code
stringlengths 1
5.19M
| package
stringlengths 1
81
| path
stringlengths 9
304
| filename
stringlengths 4
145
|
---|---|---|---|
# encoding: utf-8
# author: BrikerMan
# contact: eliyar917@gmail.com
# blog: https://eliyar.biz
# file: migration.py
# time: 2:31 下午
import subprocess
import logging
guide = """
╭─────────────────────────────────────────────────────────────────────────╮
│ ◎ ○ ○ ░░░░░░░░░░░░░░░░░░░░░ Important Message ░░░░░░░░░░░░░░░░░░░░░░░░│
├─────────────────────────────────────────────────────────────────────────┤
│ │
│ We renamed again for consistency and clarity. │
│ From now on, it is all `kashgari`. │
│ Changelog: https://github.com/BrikerMan/Kashgari/releases/tag/v1.0.0 │
│ │
│ | Backend | pypi version | desc | │
│ | ---------------- | -------------- | -------------- | │
│ | TensorFlow 2.x | kashgari 2.x.x | coming soon | │
│ | TensorFlow 1.14+ | kashgari 1.x.x | | │
│ | Keras | kashgari 0.x.x | legacy version | │
│ │
╰─────────────────────────────────────────────────────────────────────────╯
"""
def show_migration_guide():
requirements = subprocess.getoutput("pip freeze")
for package in requirements.splitlines():
if '==' in package:
package_name, package_version = package.split('==')
if package_name == 'kashgari-tf':
logging.warning(guide)
if __name__ == "__main__":
show_migration_guide()
print("hello, world")
| 125softNLP | /125softNLP-0.0.1-py3-none-any.whl/pysoftNLP/kashgari/migeration.py | migeration.py |
# encoding: utf-8
"""
@author: BrikerMan
@contact: eliyar917@gmail.com
@blog: https://eliyar.biz
@version: 1.0
@license: Apache Licence
@file: macros.py
@time: 2019-05-17 11:38
"""
import os
import logging
from pathlib import Path
import tensorflow as tf
DATA_PATH = os.path.join(str(Path.home()), '.kashgari')
Path(DATA_PATH).mkdir(exist_ok=True, parents=True)
class TaskType(object):
CLASSIFICATION = 'classification'
LABELING = 'labeling'
SCORING = 'scoring'
class Config(object):
def __init__(self):
self._use_cudnn_cell = False
self.disable_auto_summary = False
if tf.test.is_gpu_available(cuda_only=True):
logging.warning("CUDA GPU available, you can set `kashgari.config.use_cudnn_cell = True` to use CuDNNCell. "
"This will speed up the training, "
"but will make model incompatible with CPU device.")
@property
def use_cudnn_cell(self):
return self._use_cudnn_cell
@use_cudnn_cell.setter
def use_cudnn_cell(self, value):
self._use_cudnn_cell = value
from kashgari.layers import L
if value:
if tf.test.is_gpu_available(cuda_only=True):
L.LSTM = tf.compat.v1.keras.layers.CuDNNLSTM
L.GRU = tf.compat.v1.keras.layers.CuDNNGRU
logging.warning("CuDNN enabled, this will speed up the training, "
"but will make model incompatible with CPU device.")
else:
logging.warning("Unable to use CuDNN cell, no GPU available.")
else:
L.LSTM = tf.keras.layers.LSTM
L.GRU = tf.keras.layers.GRU
def to_dict(self):
return {
'use_cudnn_cell': self.use_cudnn_cell
}
config = Config()
if __name__ == "__main__":
print("Hello world")
| 125softNLP | /125softNLP-0.0.1-py3-none-any.whl/pysoftNLP/kashgari/macros.py | macros.py |
# encoding: utf-8
# author: BrikerMan
# contact: eliyar917@gmail.com
# blog: https://eliyar.biz
# file: __version__.py.py
# time: 2019-05-20 16:32
__version__ = '1.1.1'
| 125softNLP | /125softNLP-0.0.1-py3-none-any.whl/pysoftNLP/kashgari/__version__.py | __version__.py |
# encoding: utf-8
import os
import logging
import pandas as pd
from pysoftNLP.kashgari import macros as k
from typing import Tuple, List
from tensorflow.python.keras.utils import get_file
from pysoftNLP.kashgari import utils
CORPUS_PATH = os.path.join(k.DATA_PATH, 'corpus')
class DataReader(object):
@staticmethod
def read_conll_format_file(file_path: str,
text_index: int = 0,
label_index: int = 1) -> Tuple[List[List[str]], List[List[str]]]:
"""
Read conll format data_file
Args:
file_path: path of target file
text_index: index of text data, default 0
label_index: index of label data, default 1
Returns:
"""
x_data, y_data = [], []
with open(file_path, 'r', encoding='utf-8') as f:
lines = f.read().splitlines()
x, y = [], []
for line in lines:
rows = line.split(' ')
if len(rows) == 1:
x_data.append(x)
y_data.append(y)
x = []
y = []
else:
x.append(rows[text_index])
y.append(rows[label_index])
return x_data, y_data
class ChineseDailyNerCorpus(object):
"""
Chinese Daily New New Corpus
https://github.com/zjy-ucas/ChineseNER/
"""
# __corpus_name__ = 'china-people-daily-ner-corpus'
__corpus_name__ ='D:\pysoftNLP_resources\ner\china-people-daily-ner-corpus'
__zip_file__name = 'http://s3.bmio.net/kashgari/china-people-daily-ner-corpus.tar.gz'
@classmethod
def load_data(cls,
subset_name: str = 'train',
shuffle: bool = True) -> Tuple[List[List[str]], List[List[str]]]:
"""
Load dataset as sequence labeling format, char level tokenized
features: ``[['海', '钓', '比', '赛', '地', '点', '在', '厦', '门', ...], ...]``
labels: ``[['O', 'O', 'O', 'O', 'O', 'O', 'O', 'B-LOC', 'I-LOC', ...], ...]``
Sample::
train_x, train_y = ChineseDailyNerCorpus.load_data('train')
test_x, test_y = ChineseDailyNerCorpus.load_data('test')
Args:
subset_name: {train, test, valid}
shuffle: should shuffle or not, default True.
Returns:
dataset_features and dataset labels
"""
corpus_path = get_file(cls.__corpus_name__,
cls.__zip_file__name,
cache_dir=k.DATA_PATH,
untar=True)
corpus_path = 'D:\pysoftNLP_resources\entity_recognition\china-people-daily-ner-corpus'
print(corpus_path)
if subset_name == 'train':
file_path = os.path.join(corpus_path, 'example.train')
elif subset_name == 'test':
file_path = os.path.join(corpus_path, 'example.test')
else:
file_path = os.path.join(corpus_path, 'example.dev')
x_data, y_data = DataReader.read_conll_format_file(file_path)
if shuffle:
x_data, y_data = utils.unison_shuffled_copies(x_data, y_data)
logging.debug(f"loaded {len(x_data)} samples from {file_path}. Sample:\n"
f"x[0]: {x_data[0]}\n"
f"y[0]: {y_data[0]}")
return x_data, y_data
class CONLL2003ENCorpus(object):
__corpus_name__ = 'conll2003_en'
__zip_file__name = 'http://s3.bmio.net/kashgari/conll2003_en.tar.gz'
@classmethod
def load_data(cls,
subset_name: str = 'train',
task_name: str = 'ner',
shuffle: bool = True) -> Tuple[List[List[str]], List[List[str]]]:
"""
"""
corpus_path = get_file(cls.__corpus_name__,
cls.__zip_file__name,
cache_dir=k.DATA_PATH,
untar=True)
if subset_name not in {'train', 'test', 'valid'}:
raise ValueError()
file_path = os.path.join(corpus_path, f'{subset_name}.txt')
if task_name not in {'pos', 'chunking', 'ner'}:
raise ValueError()
data_index = ['pos', 'chunking', 'ner'].index(task_name) + 1
x_data, y_data = DataReader.read_conll_format_file(file_path, label_index=data_index)
if shuffle:
x_data, y_data = utils.unison_shuffled_copies(x_data, y_data)
logging.debug(f"loaded {len(x_data)} samples from {file_path}. Sample:\n"
f"x[0]: {x_data[0]}\n"
f"y[0]: {y_data[0]}")
return x_data, y_data
class SMP2018ECDTCorpus(object):
"""
https://worksheets.codalab.org/worksheets/0x27203f932f8341b79841d50ce0fd684f/
This dataset is released by the Evaluation of Chinese Human-Computer Dialogue Technology (SMP2018-ECDT)
task 1 and is provided by the iFLYTEK Corporation, which is a Chinese human-computer dialogue dataset.
sample::
label query
0 weather 今天东莞天气如何
1 map 从观音桥到重庆市图书馆怎么走
2 cookbook 鸭蛋怎么腌?
3 health 怎么治疗牛皮癣
4 chat 唠什么
"""
__corpus_name__ = 'SMP2018ECDTCorpus'
__zip_file__name = 'http://s3.bmio.net/kashgari/SMP2018ECDTCorpus.tar.gz'
@classmethod
def load_data(cls,
subset_name: str = 'train',
shuffle: bool = True,
cutter: str = 'char') -> Tuple[List[List[str]], List[str]]:
"""
Load dataset as sequence classification format, char level tokenized
features: ``[['听', '新', '闻', '。'], ['电', '视', '台', '在', '播', '什', '么'], ...]``
labels: ``['news', 'epg', ...]``
Samples::
train_x, train_y = SMP2018ECDTCorpus.load_data('train')
test_x, test_y = SMP2018ECDTCorpus.load_data('test')
Args:
subset_name: {train, test, valid}
shuffle: should shuffle or not, default True.
cutter: sentence cutter, {char, jieba}
Returns:
dataset_features and dataset labels
"""
corpus_path = get_file(cls.__corpus_name__,
cls.__zip_file__name,
cache_dir=k.DATA_PATH,
untar=True)
if cutter not in ['char', 'jieba', 'none']:
raise ValueError('cutter error, please use one onf the {char, jieba}')
df_path = os.path.join(corpus_path, f'{subset_name}.csv')
df = pd.read_csv(df_path)
if cutter == 'jieba':
try:
import jieba
except ModuleNotFoundError:
raise ModuleNotFoundError(
"please install jieba, `$ pip install jieba`")
x_data = [list(jieba.cut(item)) for item in df['query'].to_list()]
elif 'char':
x_data = [list(item) for item in df['query'].to_list()]
y_data = df['label'].to_list()
if shuffle:
x_data, y_data = utils.unison_shuffled_copies(x_data, y_data)
logging.debug(f"loaded {len(x_data)} samples from {df_path}. Sample:\n"
f"x[0]: {x_data[0]}\n"
f"y[0]: {y_data[0]}")
return x_data, y_data
if __name__ == "__main__":
a, b = CONLL2003ENCorpus.load_data()
print(a[:2])
print(b[:2])
print("Hello world")
| 125softNLP | /125softNLP-0.0.1-py3-none-any.whl/pysoftNLP/kashgari/corpus.py | corpus.py |
# encoding: utf-8
# author: BrikerMan
# contact: eliyar917@gmail.com
# blog: https://eliyar.biz
# file: callbacks.py
# time: 2019-05-22 15:00
from sklearn import metrics
from pysoftNLP.kashgari import macros
from tensorflow.python import keras
from pysoftNLP.kashgari.tasks.base_model import BaseModel
from seqeval import metrics as seq_metrics
class EvalCallBack(keras.callbacks.Callback):
def __init__(self, kash_model: BaseModel, valid_x, valid_y,
step=5, batch_size=256, average='weighted'):
"""
Evaluate callback, calculate precision, recall and f1
Args:
kash_model: the kashgari model to evaluate
valid_x: feature data
valid_y: label data
step: step, default 5
batch_size: batch size, default 256
"""
super(EvalCallBack, self).__init__()
self.kash_model = kash_model
self.valid_x = valid_x
self.valid_y = valid_y
self.step = step
self.batch_size = batch_size
self.average = average
self.logs = []
def on_epoch_end(self, epoch, logs=None):
if (epoch + 1) % self.step == 0:
y_pred = self.kash_model.predict(self.valid_x, batch_size=self.batch_size)
if self.kash_model.task == macros.TaskType.LABELING:
y_true = [seq[:len(y_pred[index])] for index, seq in enumerate(self.valid_y)]
precision = seq_metrics.precision_score(y_true, y_pred)
recall = seq_metrics.recall_score(y_true, y_pred)
f1 = seq_metrics.f1_score(y_true, y_pred)
else:
y_true = self.valid_y
precision = metrics.precision_score(y_true, y_pred, average=self.average)
recall = metrics.recall_score(y_true, y_pred, average=self.average)
f1 = metrics.f1_score(y_true, y_pred, average=self.average)
self.logs.append({
'precision': precision,
'recall': recall,
'f1': f1
})
print(f"\nepoch: {epoch} precision: {precision:.6f}, recall: {recall:.6f}, f1: {f1:.6f}")
if __name__ == "__main__":
print("Hello world")
| 125softNLP | /125softNLP-0.0.1-py3-none-any.whl/pysoftNLP/kashgari/callbacks.py | callbacks.py |
# encoding: utf-8
"""
@author: BrikerMan
@contact: eliyar917@gmail.com
@blog: https://eliyar.biz
@version: 1.0
@license: Apache Licence
@file: helpers.py
@time: 2019-05-17 11:37
"""
import json
import os
import pathlib
import pydoc
import random
import time
from typing import List, Optional, Dict, Union
import tensorflow as tf
from tensorflow.python import keras, saved_model
from pysoftNLP.kashgari import custom_objects
from pysoftNLP.kashgari.embeddings.base_embedding import Embedding
from pysoftNLP.kashgari.layers.crf import CRF
from pysoftNLP.kashgari.processors.base_processor import BaseProcessor
from pysoftNLP.kashgari.tasks.base_model import BaseModel
from pysoftNLP.kashgari.tasks.classification.base_model import BaseClassificationModel
from pysoftNLP.kashgari.tasks.labeling.base_model import BaseLabelingModel
def unison_shuffled_copies(a, b):
assert len(a) == len(b)
c = list(zip(a, b))
random.shuffle(c)
a, b = zip(*c)
return list(a), list(b)
def get_list_subset(target: List, index_list: List[int]) -> List:
return [target[i] for i in index_list if i < len(target)]
def custom_object_scope():
return tf.keras.utils.custom_object_scope(custom_objects)
def load_model(model_path: str, load_weights: bool = True) -> Union[BaseClassificationModel, BaseLabelingModel]:
"""
Load saved model from saved model from `model.save` function
Args:
model_path: model folder path
load_weights: only load model structure and vocabulary when set to False, default True.
Returns:
"""
with open(os.path.join(model_path, 'model_info.json'), 'r') as f:
model_info = json.load(f)
model_class = pydoc.locate(f"{model_info['module']}.{model_info['class_name']}")
model_json_str = json.dumps(model_info['tf_model'])
model = model_class()
model.tf_model = tf.keras.models.model_from_json(model_json_str, custom_objects)
if load_weights:
model.tf_model.load_weights(os.path.join(model_path, 'model_weights.h5'))
embed_info = model_info['embedding']
embed_class = pydoc.locate(f"{embed_info['module']}.{embed_info['class_name']}")
embedding: Embedding = embed_class._load_saved_instance(embed_info,
model_path,
model.tf_model)
model.embedding = embedding
if type(model.tf_model.layers[-1]) == CRF:
model.layer_crf = model.tf_model.layers[-1]
return model
def load_processor(model_path: str) -> BaseProcessor:
"""
Load processor from model
When we using tf-serving, we need to use model's processor to pre-process data
Args:
model_path:
Returns:
"""
with open(os.path.join(model_path, 'model_info.json'), 'r') as f:
model_info = json.load(f)
processor_info = model_info['embedding']['processor']
processor_class = pydoc.locate(f"{processor_info['module']}.{processor_info['class_name']}")
processor: BaseProcessor = processor_class(**processor_info['config'])
return processor
def convert_to_saved_model(model: BaseModel,
model_path: str,
version: str = None,
inputs: Optional[Dict] = None,
outputs: Optional[Dict] = None):
"""
Export model for tensorflow serving
Args:
model: Target model
model_path: The path to which the SavedModel will be stored.
version: The model version code, default timestamp
inputs: dict mapping string input names to tensors. These are added
to the SignatureDef as the inputs.
outputs: dict mapping string output names to tensors. These are added
to the SignatureDef as the outputs.
"""
pathlib.Path(model_path).mkdir(exist_ok=True, parents=True)
if version is None:
version = round(time.time())
export_path = os.path.join(model_path, str(version))
if inputs is None:
inputs = {i.name: i for i in model.tf_model.inputs}
if outputs is None:
outputs = {o.name: o for o in model.tf_model.outputs}
sess = keras.backend.get_session()
saved_model.simple_save(session=sess,
export_dir=export_path,
inputs=inputs,
outputs=outputs)
with open(os.path.join(export_path, 'model_info.json'), 'w') as f:
f.write(json.dumps(model.info(), indent=2, ensure_ascii=True))
f.close()
if __name__ == "__main__":
path = '/Users/brikerman/Desktop/python/Kashgari/tests/classification/saved_models/' \
'kashgari.tasks.classification.models/BiLSTM_Model'
p = load_processor(path)
print(p.process_x_dataset([list('语言模型')]))
print(p.label2idx)
print(p.token2idx)
| 125softNLP | /125softNLP-0.0.1-py3-none-any.whl/pysoftNLP/kashgari/utils.py | utils.py |
# encoding: utf-8
# author: BrikerMan
# contact: eliyar917@gmail.com
# blog: https://eliyar.biz
# file: migration.py
# time: 2:31 下午
import subprocess
import logging
guide = """
╭─────────────────────────────────────────────────────────────────────────╮
│ ◎ ○ ○ ░░░░░░░░░░░░░░░░░░░░░ Important Message ░░░░░░░░░░░░░░░░░░░░░░░░│
├─────────────────────────────────────────────────────────────────────────┤
│ │
│ We renamed again for consistency and clarity. │
│ From now on, it is all `kashgari`. │
│ Changelog: https://github.com/BrikerMan/Kashgari/releases/tag/v1.0.0 │
│ │
│ | Backend | pypi version | desc | │
│ | ---------------- | -------------- | -------------- | │
│ | TensorFlow 2.x | kashgari 2.x.x | coming soon | │
│ | TensorFlow 1.14+ | kashgari 1.x.x | | │
│ | Keras | kashgari 0.x.x | legacy version | │
│ │
╰─────────────────────────────────────────────────────────────────────────╯
"""
def show_migration_guide():
requirements = subprocess.getoutput("pip freeze")
for package in requirements.splitlines():
if '==' in package:
package_name, package_version = package.split('==')
if package_name == 'kashgari-tf':
logging.warning(guide)
if __name__ == "__main__":
show_migration_guide()
print("hello, world")
| 125softNLP | /125softNLP-0.0.1-py3-none-any.whl/pysoftNLP/kashgari/migration.py | migration.py |
# encoding: utf-8
# author: BrikerMan
# contact: eliyar917@gmail.com
# blog: https://eliyar.biz
# file: __version__.py.py
# time: 2019-05-20 16:32
__version__ = '1.0.0'
| 125softNLP | /125softNLP-0.0.1-py3-none-any.whl/pysoftNLP/kashgari/version.py | version.py |
# encoding: utf-8
"""
@author: BrikerMan
@contact: eliyar917@gmail.com
@blog: https://eliyar.biz
@version: 1.0
@license: Apache Licence
@file: __init__.py
@time: 2019-05-17 11:15
"""
import os
os.environ['TF_KERAS'] = '1'
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import tensorflow as tf
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
import keras_bert
from pysoftNLP.kashgari.macros import TaskType, config
custom_objects = keras_bert.get_custom_objects()
CLASSIFICATION = TaskType.CLASSIFICATION
LABELING = TaskType.LABELING
SCORING = TaskType.SCORING
from kashgari.__version__ import __version__
from kashgari import layers
from kashgari import corpus
from kashgari import embeddings
from kashgari import macros
from kashgari import processors
from kashgari import tasks
from kashgari import utils
from kashgari import callbacks
from kashgari import migration
migration.show_migration_guide()
| 125softNLP | /125softNLP-0.0.1-py3-none-any.whl/pysoftNLP/kashgari/__init__.py | __init__.py |
# encoding: utf-8
# author: BrikerMan
# contact: eliyar917@gmail.com
# blog: https://eliyar.biz
# file: base_tokenizer.py
# time: 11:24 上午
class Tokenizer:
"""Abstract base class for all implemented tokenizers.
"""
def tokenize(self, text: str):
"""
Tokenize text into token sequence
Args:
text: target text sample
Returns:
List of tokens in this sample
"""
return text.split(' ')
| 125softNLP | /125softNLP-0.0.1-py3-none-any.whl/pysoftNLP/kashgari/tokenizer/base_tokenizer.py | base_tokenizer.py |
# encoding: utf-8
# author: BrikerMan
# contact: eliyar917@gmail.com
# blog: https://eliyar.biz
# file: bert_tokenizer.py
# time: 11:33 上午
# flake8: noqa: E127
import codecs
import os
import unicodedata
from pysoftNLP.kashgari.tokenizer.base_tokenizer import Tokenizer
TOKEN_PAD = '' # Token for padding
TOKEN_UNK = '[UNK]' # Token for unknown words
TOKEN_CLS = '[CLS]' # Token for classification
TOKEN_SEP = '[SEP]' # Token for separation
TOKEN_MASK = '[MASK]' # Token for masking
class BertTokenizer(Tokenizer):
"""
Bert Like Tokenizer, ref: https://github.com/CyberZHG/keras-bert/blob/master/keras_bert/tokenizer.py
"""
def __init__(self,
token_dict=None,
token_cls=TOKEN_CLS,
token_sep=TOKEN_SEP,
token_unk=TOKEN_UNK,
pad_index=0,
cased=False):
"""Initialize tokenizer.
:param token_dict: A dict maps tokens to indices.
:param token_cls: The token represents classification.
:param token_sep: The token represents separator.
:param token_unk: The token represents unknown token.
:param pad_index: The index to pad.
:param cased: Whether to keep the case.
"""
self._token_dict = token_dict
if self._token_dict:
self._token_dict_inv = {v: k for k, v in token_dict.items()}
else:
self._token_dict_inv = {}
self._token_cls = token_cls
self._token_sep = token_sep
self._token_unk = token_unk
self._pad_index = pad_index
self._cased = cased
@classmethod
def load_from_model(cls, model_path: str):
dict_path = os.path.join(model_path, 'vocab.txt')
token2idx = {}
with codecs.open(dict_path, 'r', 'utf8') as reader:
for line in reader:
token = line.strip()
token2idx[token] = len(token2idx)
return BertTokenizer(token_dict=token2idx)
@classmethod
def load_from_vacab_file(cls, vacab_path: str):
token2idx = {}
with codecs.open(vacab_path, 'r', 'utf8') as reader:
for line in reader:
token = line.strip()
token2idx[token] = len(token2idx)
return BertTokenizer(token_dict=token2idx)
def tokenize(self, first):
"""Split text to tokens.
:param first: First text.
:param second: Second text.
:return: A list of strings.
"""
tokens = self._tokenize(first)
return tokens
def _tokenize(self, text):
if not self._cased:
text = unicodedata.normalize('NFD', text)
text = ''.join([ch for ch in text if unicodedata.category(ch) != 'Mn'])
text = text.lower()
spaced = ''
for ch in text:
if self._is_punctuation(ch) or self._is_cjk_character(ch):
spaced += ' ' + ch + ' '
elif self._is_space(ch):
spaced += ' '
elif ord(ch) == 0 or ord(ch) == 0xfffd or self._is_control(ch):
continue
else:
spaced += ch
if self._token_dict:
tokens = []
for word in spaced.strip().split():
tokens += self._word_piece_tokenize(word)
return tokens
else:
return spaced.strip().split()
def _word_piece_tokenize(self, word):
if word in self._token_dict:
return [word]
tokens = []
start, stop = 0, 0
while start < len(word):
stop = len(word)
while stop > start:
sub = word[start:stop]
if start > 0:
sub = '##' + sub
if sub in self._token_dict:
break
stop -= 1
if start == stop:
stop += 1
tokens.append(sub)
start = stop
return tokens
@staticmethod
def _is_punctuation(ch): # noqa: E127
code = ord(ch)
return 33 <= code <= 47 or \
58 <= code <= 64 or \
91 <= code <= 96 or \
123 <= code <= 126 or \
unicodedata.category(ch).startswith('P')
@staticmethod
def _is_cjk_character(ch):
code = ord(ch)
return 0x4E00 <= code <= 0x9FFF or \
0x3400 <= code <= 0x4DBF or \
0x20000 <= code <= 0x2A6DF or \
0x2A700 <= code <= 0x2B73F or \
0x2B740 <= code <= 0x2B81F or \
0x2B820 <= code <= 0x2CEAF or \
0xF900 <= code <= 0xFAFF or \
0x2F800 <= code <= 0x2FA1F
@staticmethod
def _is_space(ch):
return ch == ' ' or ch == '\n' or ch == '\r' or ch == '\t' or unicodedata.category(ch) == 'Zs'
@staticmethod
def _is_control(ch):
return unicodedata.category(ch) in ('Cc', 'Cf')
| 125softNLP | /125softNLP-0.0.1-py3-none-any.whl/pysoftNLP/kashgari/tokenizer/bert_tokenizer.py | bert_tokenizer.py |
# encoding: utf-8
# author: BrikerMan
# contact: eliyar917@gmail.com
# blog: https://eliyar.biz
# file: jieba_tokenizer.py
# time: 11:54 上午
from pysoftNLP.kashgari.tokenizer.base_tokenizer import Tokenizer
class JiebaTokenizer(Tokenizer):
"""Jieba tokenizer
"""
def __init__(self):
try:
import jieba
self._jieba = jieba
except ModuleNotFoundError:
raise ModuleNotFoundError("Jieba module not found, please install use `pip install jieba`")
def tokenize(self, text: str, **kwargs):
"""
Tokenize text into token sequence
Args:
text: target text sample
Returns:
List of tokens in this sample
"""
return list(self._jieba.cut(text, **kwargs))
| 125softNLP | /125softNLP-0.0.1-py3-none-any.whl/pysoftNLP/kashgari/tokenizer/jieba_tokenizer.py | jieba_tokenizer.py |
# encoding: utf-8
# author: BrikerMan
# contact: eliyar917@gmail.com
# blog: https://eliyar.biz
# file: __init__.py
# time: 11:34 上午
from kashgari.tokenizer.base_tokenizer import Tokenizer
from kashgari.tokenizer.bert_tokenizer import BertTokenizer
from kashgari.tokenizer.jieba_tokenizer import JiebaTokenizer
| 125softNLP | /125softNLP-0.0.1-py3-none-any.whl/pysoftNLP/kashgari/tokenizer/__init__.py | __init__.py |
# encoding: utf-8
# author: BrikerMan
# contact: eliyar917@gmail.com
# blog: https://eliyar.biz
# file: base_model.py
# time: 2019-05-22 11:21
import os
import json
import logging
import warnings
import pathlib
from typing import Dict, Any, List, Optional, Union, Tuple
import numpy as np
import tensorflow as tf
from tensorflow import keras
import pysoftNLP.kashgari as kashgari
from pysoftNLP.kashgari import utils
from pysoftNLP.kashgari.embeddings import BareEmbedding
from pysoftNLP.kashgari.embeddings.base_embedding import Embedding
L = keras.layers
class BaseModel(object):
"""Base Sequence Labeling Model"""
@classmethod
def get_default_hyper_parameters(cls) -> Dict[str, Dict[str, Any]]:
raise NotImplementedError
def info(self):
model_json_str = self.tf_model.to_json()
return {
'config': {
'hyper_parameters': self.hyper_parameters,
},
'tf_model': json.loads(model_json_str),
'embedding': self.embedding.info(),
'class_name': self.__class__.__name__,
'module': self.__class__.__module__,
'tf_version': tf.__version__,
'kashgari_version': kashgari.__version__
}
@property
def task(self):
return self.embedding.task
@property
def token2idx(self) -> Dict[str, int]:
return self.embedding.token2idx
@property
def label2idx(self) -> Dict[str, int]:
return self.embedding.label2idx
@property
def pre_processor(self):
warnings.warn("The 'pre_processor' property is deprecated, "
"use 'processor' instead", DeprecationWarning, 2)
"""Deprecated. Use `self.processor` instead."""
return self.embedding.processor
@property
def processor(self):
return self.embedding.processor
def __init__(self,
embedding: Optional[Embedding] = None,
hyper_parameters: Optional[Dict[str, Dict[str, Any]]] = None):
"""
Args:
embedding: model embedding
hyper_parameters: a dict of hyper_parameters.
Examples:
You could change customize hyper_parameters like this::
# get default hyper_parameters
hyper_parameters = BLSTMModel.get_default_hyper_parameters()
# change lstm hidden unit to 12
hyper_parameters['layer_blstm']['units'] = 12
# init new model with customized hyper_parameters
labeling_model = BLSTMModel(hyper_parameters=hyper_parameters)
labeling_model.fit(x, y)
"""
if embedding is None:
self.embedding = BareEmbedding(task=self.__task__)
else:
self.embedding = embedding
self.tf_model: keras.Model = None
self.hyper_parameters = self.get_default_hyper_parameters()
self.model_info = {}
if hyper_parameters:
self.hyper_parameters.update(hyper_parameters)
def build_model(self,
x_train: Union[Tuple[List[List[str]], ...], List[List[str]]],
y_train: Union[List[List[str]], List[str]],
x_validate: Union[Tuple[List[List[str]], ...], List[List[str]]] = None,
y_validate: Union[List[List[str]], List[str]] = None):
"""
Build model with corpus
Args:
x_train: Array of train feature data (if the model has a single input),
or tuple of train feature data array (if the model has multiple inputs)
y_train: Array of train label data
x_validate: Array of validation feature data (if the model has a single input),
or tuple of validation feature data array (if the model has multiple inputs)
y_validate: Array of validation label data
Returns:
"""
if x_validate is not None and not isinstance(x_validate, tuple):
self.embedding.analyze_corpus(x_train + x_validate, y_train + y_validate)
else:
self.embedding.analyze_corpus(x_train, y_train)
if self.tf_model is None:
self.build_model_arc()
self.compile_model()
def build_multi_gpu_model(self,
gpus: int,
x_train: Union[Tuple[List[List[str]], ...], List[List[str]]],
y_train: Union[List[List[str]], List[str]],
cpu_merge: bool = True,
cpu_relocation: bool = False,
x_validate: Union[Tuple[List[List[str]], ...], List[List[str]]] = None,
y_validate: Union[List[List[str]], List[str]] = None):
"""
Build multi-GPU model with corpus
Args:
gpus: Integer >= 2, number of on GPUs on which to create model replicas.
cpu_merge: A boolean value to identify whether to force merging model weights
under the scope of the CPU or not.
cpu_relocation: A boolean value to identify whether to create the model's weights
under the scope of the CPU. If the model is not defined under any preceding device
scope, you can still rescue it by activating this option.
x_train: Array of train feature data (if the model has a single input),
or tuple of train feature data array (if the model has multiple inputs)
y_train: Array of train label data
x_validate: Array of validation feature data (if the model has a single input),
or tuple of validation feature data array (if the model has multiple inputs)
y_validate: Array of validation label data
Returns:
"""
if x_validate is not None and not isinstance(x_validate, tuple):
self.embedding.analyze_corpus(x_train + x_validate, y_train + y_validate)
else:
self.embedding.analyze_corpus(x_train, y_train)
if self.tf_model is None:
with utils.custom_object_scope():
self.build_model_arc()
self.tf_model = tf.keras.utils.multi_gpu_model(self.tf_model,
gpus,
cpu_merge=cpu_merge,
cpu_relocation=cpu_relocation)
self.compile_model()
def build_tpu_model(self, strategy: tf.contrib.distribute.TPUStrategy,
x_train: Union[Tuple[List[List[str]], ...], List[List[str]]],
y_train: Union[List[List[str]], List[str]],
x_validate: Union[Tuple[List[List[str]], ...], List[List[str]]] = None,
y_validate: Union[List[List[str]], List[str]] = None):
"""
Build TPU model with corpus
Args:
strategy: `TPUDistributionStrategy`. The strategy to use for replicating model
across multiple TPU cores.
x_train: Array of train feature data (if the model has a single input),
or tuple of train feature data array (if the model has multiple inputs)
y_train: Array of train label data
x_validate: Array of validation feature data (if the model has a single input),
or tuple of validation feature data array (if the model has multiple inputs)
y_validate: Array of validation label data
Returns:
"""
if x_validate is not None and not isinstance(x_validate, tuple):
self.embedding.analyze_corpus(x_train + x_validate, y_train + y_validate)
else:
self.embedding.analyze_corpus(x_train, y_train)
if self.tf_model is None:
with utils.custom_object_scope():
self.build_model_arc()
self.tf_model = tf.contrib.tpu.keras_to_tpu_model(self.tf_model, strategy=strategy)
self.compile_model(optimizer=tf.train.AdamOptimizer())
def get_data_generator(self,
x_data,
y_data,
batch_size: int = 64,
shuffle: bool = True):
"""
data generator for fit_generator
Args:
x_data: Array of feature data (if the model has a single input),
or tuple of feature data array (if the model has multiple inputs)
y_data: Array of label data
batch_size: Number of samples per gradient update, default to 64.
shuffle:
Returns:
data generator
"""
index_list = np.arange(len(x_data))
page_count = len(x_data) // batch_size + 1
while True:
if shuffle:
np.random.shuffle(index_list)
for page in range(page_count):
start_index = page * batch_size
end_index = start_index + batch_size
target_index = index_list[start_index: end_index]
if len(target_index) == 0:
target_index = index_list[0: batch_size]
x_tensor = self.embedding.process_x_dataset(x_data,
target_index)
y_tensor = self.embedding.process_y_dataset(y_data,
target_index)
yield (x_tensor, y_tensor)
def fit(self,
x_train: Union[Tuple[List[List[str]], ...], List[List[str]]],
y_train: Union[List[List[str]], List[str]],
x_validate: Union[Tuple[List[List[str]], ...], List[List[str]]] = None,
y_validate: Union[List[List[str]], List[str]] = None,
batch_size: int = 64,
epochs: int = 5,
callbacks: List[keras.callbacks.Callback] = None,
fit_kwargs: Dict = None,
shuffle: bool = True):
"""
Trains the model for a given number of epochs with fit_generator (iterations on a dataset).
Args:
x_train: Array of train feature data (if the model has a single input),
or tuple of train feature data array (if the model has multiple inputs)
y_train: Array of train label data
x_validate: Array of validation feature data (if the model has a single input),
or tuple of validation feature data array (if the model has multiple inputs)
y_validate: Array of validation label data
batch_size: Number of samples per gradient update, default to 64.
epochs: Integer. Number of epochs to train the model. default 5.
callbacks:
fit_kwargs: fit_kwargs: additional arguments passed to ``fit_generator()`` function from
``tensorflow.keras.Model``
- https://www.tensorflow.org/api_docs/python/tf/keras/models/Model#fit_generator
shuffle:
Returns:
"""
self.build_model(x_train, y_train, x_validate, y_validate)
train_generator = self.get_data_generator(x_train,
y_train,
batch_size,
shuffle)
if fit_kwargs is None:
fit_kwargs = {}
validation_generator = None
validation_steps = None
if x_validate:
validation_generator = self.get_data_generator(x_validate,
y_validate,
batch_size,
shuffle)
if isinstance(x_validate, tuple):
validation_steps = len(x_validate[0]) // batch_size + 1
else:
validation_steps = len(x_validate) // batch_size + 1
if isinstance(x_train, tuple):
steps_per_epoch = len(x_train[0]) // batch_size + 1
else:
steps_per_epoch = len(x_train) // batch_size + 1
with utils.custom_object_scope():
return self.tf_model.fit_generator(train_generator,
steps_per_epoch=steps_per_epoch,
epochs=epochs,
validation_data=validation_generator,
validation_steps=validation_steps,
callbacks=callbacks,
**fit_kwargs)
def fit_without_generator(self,
x_train: Union[Tuple[List[List[str]], ...], List[List[str]]],
y_train: Union[List[List[str]], List[str]],
x_validate: Union[Tuple[List[List[str]], ...], List[List[str]]] = None,
y_validate: Union[List[List[str]], List[str]] = None,
batch_size: int = 64,
epochs: int = 5,
callbacks: List[keras.callbacks.Callback] = None,
fit_kwargs: Dict = None):
"""
Trains the model for a given number of epochs (iterations on a dataset).
Args:
x_train: Array of train feature data (if the model has a single input),
or tuple of train feature data array (if the model has multiple inputs)
y_train: Array of train label data
x_validate: Array of validation feature data (if the model has a single input),
or tuple of validation feature data array (if the model has multiple inputs)
y_validate: Array of validation label data
batch_size: Number of samples per gradient update, default to 64.
epochs: Integer. Number of epochs to train the model. default 5.
callbacks:
fit_kwargs: fit_kwargs: additional arguments passed to ``fit_generator()`` function from
``tensorflow.keras.Model``
- https://www.tensorflow.org/api_docs/python/tf/keras/models/Model#fit_generator
Returns:
"""
self.build_model(x_train, y_train, x_validate, y_validate)
tensor_x = self.embedding.process_x_dataset(x_train)
tensor_y = self.embedding.process_y_dataset(y_train)
validation_data = None
if x_validate is not None:
tensor_valid_x = self.embedding.process_x_dataset(x_validate)
tensor_valid_y = self.embedding.process_y_dataset(y_validate)
validation_data = (tensor_valid_x, tensor_valid_y)
if fit_kwargs is None:
fit_kwargs = {}
if callbacks and 'callbacks' not in fit_kwargs:
fit_kwargs['callbacks'] = callbacks
with utils.custom_object_scope():
return self.tf_model.fit(tensor_x, tensor_y,
validation_data=validation_data,
epochs=epochs,
batch_size=batch_size,
**fit_kwargs)
def compile_model(self, **kwargs):
"""Configures the model for training.
Using ``compile()`` function of ``tf.keras.Model`` -
https://www.tensorflow.org/api_docs/python/tf/keras/models/Model#compile
Args:
**kwargs: arguments passed to ``compile()`` function of ``tf.keras.Model``
Defaults:
- loss: ``categorical_crossentropy``
- optimizer: ``adam``
- metrics: ``['accuracy']``
"""
if kwargs.get('loss') is None:
kwargs['loss'] = 'categorical_crossentropy'
if kwargs.get('optimizer') is None:
kwargs['optimizer'] = 'adam'
if kwargs.get('metrics') is None:
kwargs['metrics'] = ['accuracy']
self.tf_model.compile(**kwargs)
if not kashgari.config.disable_auto_summary:
self.tf_model.summary()
def predict(self,
x_data,
batch_size=32,
debug_info=False,
predict_kwargs: Dict = None):
"""
Generates output predictions for the input samples.
Computation is done in batches.
Args:
x_data: The input data, as a Numpy array (or list of Numpy arrays if the model has multiple inputs).
batch_size: Integer. If unspecified, it will default to 32.
debug_info: Bool, Should print out the logging info.
predict_kwargs: arguments passed to ``predict()`` function of ``tf.keras.Model``
Returns:
array(s) of predictions.
"""
if predict_kwargs is None:
predict_kwargs = {}
with utils.custom_object_scope():
if isinstance(x_data, tuple):
lengths = [len(sen) for sen in x_data[0]]
else:
lengths = [len(sen) for sen in x_data]
tensor = self.embedding.process_x_dataset(x_data)
pred = self.tf_model.predict(tensor, batch_size=batch_size, **predict_kwargs)
if self.task == 'scoring':
t_pred = pred
else:
t_pred = pred.argmax(-1)
res = self.embedding.reverse_numerize_label_sequences(t_pred,
lengths)
if debug_info:
print('input: {}'.format(tensor))
print('output: {}'.format(pred))
print('output argmax: {}'.format(t_pred))
return res
def evaluate(self,
x_data,
y_data,
batch_size=None,
digits=4,
debug_info=False) -> Tuple[float, float, Dict]:
"""
Evaluate model
Args:
x_data:
y_data:
batch_size:
digits:
debug_info:
Returns:
"""
raise NotImplementedError
def build_model_arc(self):
raise NotImplementedError
def save(self, model_path: str):
"""
Save model
Args:
model_path:
Returns:
"""
pathlib.Path(model_path).mkdir(exist_ok=True, parents=True)
with open(os.path.join(model_path, 'model_info.json'), 'w') as f:
f.write(json.dumps(self.info(), indent=2, ensure_ascii=True))
f.close()
self.tf_model.save_weights(os.path.join(model_path, 'model_weights.h5'))
logging.info('model saved to {}'.format(os.path.abspath(model_path)))
if __name__ == "__main__":
from kashgari.tasks.labeling import CNN_LSTM_Model
from kashgari.corpus import ChineseDailyNerCorpus
train_x, train_y = ChineseDailyNerCorpus.load_data('valid')
model = CNN_LSTM_Model()
model.build_model(train_x[:100], train_y[:100])
r = model.predict_entities(train_x[:5])
model.save('./res')
import pprint
pprint.pprint(r)
model.evaluate(train_x[:20], train_y[:20])
print("Hello world")
print(model.predict(train_x[:20]))
| 125softNLP | /125softNLP-0.0.1-py3-none-any.whl/pysoftNLP/kashgari/tasks/base_model.py | base_model.py |
# encoding: utf-8
# author: BrikerMan
# contact: eliyar917@gmail.com
# blog: https://eliyar.biz
# file: __init__.py
# time: 2019-05-20 11:34
from pysoftNLP.kashgari.tasks import labeling
from pysoftNLP.kashgari.tasks import classification
if __name__ == "__main__":
print("Hello world")
| 125softNLP | /125softNLP-0.0.1-py3-none-any.whl/pysoftNLP/kashgari/tasks/__init__.py | __init__.py |
# encoding: utf-8
# author: BrikerMan
# contact: eliyar917@gmail.com
# blog: https://eliyar.biz
# file: base_model.py
# time: 2019-05-20 13:07
from typing import Dict, Any, Tuple
import random
import logging
from seqeval.metrics import classification_report
from seqeval.metrics.sequence_labeling import get_entities
from pysoftNLP.kashgari.tasks.base_model import BaseModel
class BaseLabelingModel(BaseModel):
"""Base Sequence Labeling Model"""
__task__ = 'labeling'
@classmethod
def get_default_hyper_parameters(cls) -> Dict[str, Dict[str, Any]]:
raise NotImplementedError
def predict_entities(self,
x_data,
batch_size=None,
join_chunk=' ',
debug_info=False,
predict_kwargs: Dict = None):
"""Gets entities from sequence.
Args:
x_data: The input data, as a Numpy array (or list of Numpy arrays if the model has multiple inputs).
batch_size: Integer. If unspecified, it will default to 32.
join_chunk: str or False,
debug_info: Bool, Should print out the logging info.
predict_kwargs: arguments passed to ``predict()`` function of ``tf.keras.Model``
Returns:
list: list of entity.
"""
if isinstance(x_data, tuple):
text_seq = x_data[0]
else:
text_seq = x_data
res = self.predict(x_data, batch_size, debug_info, predict_kwargs)
new_res = [get_entities(seq) for seq in res]
final_res = []
for index, seq in enumerate(new_res):
seq_data = []
for entity in seq:
if join_chunk is False:
value = text_seq[index][entity[1]:entity[2] + 1],
else:
value = join_chunk.join(text_seq[index][entity[1]:entity[2] + 1])
seq_data.append({
"entity": entity[0],
"start": entity[1],
"end": entity[2],
"value": value,
})
final_res.append({
'text': join_chunk.join(text_seq[index]),
'text_raw': text_seq[index],
'labels': seq_data
})
return final_res
def evaluate(self,
x_data,
y_data,
batch_size=None,
digits=4,
debug_info=False) -> Tuple[float, float, Dict]:
"""
Build a text report showing the main classification metrics.
Args:
x_data:
y_data:
batch_size:
digits:
debug_info:
Returns:
"""
y_pred = self.predict(x_data, batch_size=batch_size)
y_true = [seq[:len(y_pred[index])] for index, seq in enumerate(y_data)]
new_y_pred = []
for x in y_pred:
new_y_pred.append([str(i) for i in x])
new_y_true = []
for x in y_true:
new_y_true.append([str(i) for i in x])
if debug_info:
for index in random.sample(list(range(len(x_data))), 5):
logging.debug('------ sample {} ------'.format(index))
logging.debug('x : {}'.format(x_data[index]))
logging.debug('y_true : {}'.format(y_true[index]))
logging.debug('y_pred : {}'.format(y_pred[index]))
report = classification_report(y_true, y_pred, digits=digits)
print(classification_report(y_true, y_pred, digits=digits))
return report
def build_model_arc(self):
raise NotImplementedError
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG)
from kashgari.tasks.labeling import BiLSTM_Model
from kashgari.corpus import ChineseDailyNerCorpus
from kashgari.utils import load_model
train_x, train_y = ChineseDailyNerCorpus.load_data('train', shuffle=False)
valid_x, valid_y = ChineseDailyNerCorpus.load_data('valid')
train_x, train_y = train_x[:5120], train_y[:5120]
model = load_model('/Users/brikerman/Desktop/blstm_model')
# model.build_model(train_x[:100], train_y[:100])
# model.fit(train_x[:1000], train_y[:1000], epochs=10)
# model.evaluate(train_x[:20], train_y[:20])
print("Hello world")
| 125softNLP | /125softNLP-0.0.1-py3-none-any.whl/pysoftNLP/kashgari/tasks/labeling/base_model.py | base_model.py |
# encoding: utf-8
# author: BrikerMan
# contact: eliyar917@gmail.com
# blog: https://eliyar.biz
# file: experimental.py
# time: 2019-05-22 19:35
from typing import Dict, Any
from tensorflow import keras
import pysoftNLP.kashgari as kashgari
from pysoftNLP.kashgari.tasks.labeling.base_model import BaseLabelingModel
from pysoftNLP.kashgari.layers import L
from keras_self_attention import SeqSelfAttention
class BLSTMAttentionModel(BaseLabelingModel):
"""Bidirectional LSTM Self Attention Sequence Labeling Model"""
@classmethod
def get_default_hyper_parameters(cls) -> Dict[str, Dict[str, Any]]:
"""
Get hyper parameters of model
Returns:
hyper parameters dict
"""
return {
'layer_blstm': {
'units': 64,
'return_sequences': True
},
'layer_self_attention': {
'attention_activation': 'sigmoid'
},
'layer_dropout': {
'rate': 0.5
},
'layer_time_distributed': {},
'layer_activation': {
'activation': 'softmax'
}
}
def build_model_arc(self):
"""
build model architectural
"""
output_dim = len(self.processor.label2idx)
config = self.hyper_parameters
embed_model = self.embedding.embed_model
layer_blstm = L.Bidirectional(L.LSTM(**config['layer_blstm']),
name='layer_blstm')
layer_self_attention = SeqSelfAttention(**config['layer_self_attention'],
name='layer_self_attention')
layer_dropout = L.Dropout(**config['layer_dropout'],
name='layer_dropout')
layer_time_distributed = L.TimeDistributed(L.Dense(output_dim,
**config['layer_time_distributed']),
name='layer_time_distributed')
layer_activation = L.Activation(**config['layer_activation'])
tensor = layer_blstm(embed_model.output)
tensor = layer_self_attention(tensor)
tensor = layer_dropout(tensor)
tensor = layer_time_distributed(tensor)
output_tensor = layer_activation(tensor)
self.tf_model = keras.Model(embed_model.inputs, output_tensor)
# Register custom layer
kashgari.custom_objects['SeqSelfAttention'] = SeqSelfAttention
if __name__ == "__main__":
print("Hello world")
| 125softNLP | /125softNLP-0.0.1-py3-none-any.whl/pysoftNLP/kashgari/tasks/labeling/experimental.py | experimental.py |
# encoding: utf-8
# author: BrikerMan
# contact: eliyar917@gmail.com
# blog: https://eliyar.biz
# file: __init__.py
# time: 2019-05-20 11:34
from pysoftNLP.kashgari.tasks.labeling.models import CNN_LSTM_Model
from pysoftNLP.kashgari.tasks.labeling.models import BiLSTM_Model
from pysoftNLP.kashgari.tasks.labeling.models import BiLSTM_CRF_Model
from pysoftNLP.kashgari.tasks.labeling.models import BiGRU_Model
from pysoftNLP.kashgari.tasks.labeling.models import BiGRU_CRF_Model
if __name__ == "__main__":
print("Hello world")
| 125softNLP | /125softNLP-0.0.1-py3-none-any.whl/pysoftNLP/kashgari/tasks/labeling/__init__.py | __init__.py |
# encoding: utf-8
# author: BrikerMan
# contact: eliyar917@gmail.com
# blog: https://eliyar.biz
# file: models.py
# time: 2019-05-20 11:13
import logging
from typing import Dict, Any
from tensorflow import keras
from pysoftNLP.kashgari.tasks.labeling.base_model import BaseLabelingModel
from pysoftNLP.kashgari.layers import L
from pysoftNLP.kashgari.layers.crf import CRF
from pysoftNLP.kashgari.utils import custom_objects
custom_objects['CRF'] = CRF
class BiLSTM_Model(BaseLabelingModel):
"""Bidirectional LSTM Sequence Labeling Model"""
@classmethod
def get_default_hyper_parameters(cls) -> Dict[str, Dict[str, Any]]:
"""
Get hyper parameters of model
Returns:
hyper parameters dict
"""
return {
'layer_blstm': {
'units': 128,
'return_sequences': True
},
'layer_dropout': {
'rate': 0.4
},
'layer_time_distributed': {},
'layer_activation': {
'activation': 'softmax'
}
}
def build_model_arc(self):
"""
build model architectural
"""
output_dim = len(self.processor.label2idx)
config = self.hyper_parameters
embed_model = self.embedding.embed_model
layer_blstm = L.Bidirectional(L.LSTM(**config['layer_blstm']),
name='layer_blstm')
layer_dropout = L.Dropout(**config['layer_dropout'],
name='layer_dropout')
layer_time_distributed = L.TimeDistributed(L.Dense(output_dim,
**config['layer_time_distributed']),
name='layer_time_distributed')
layer_activation = L.Activation(**config['layer_activation'])
tensor = layer_blstm(embed_model.output)
tensor = layer_dropout(tensor)
tensor = layer_time_distributed(tensor)
output_tensor = layer_activation(tensor)
self.tf_model = keras.Model(embed_model.inputs, output_tensor)
class BiLSTM_CRF_Model(BaseLabelingModel):
"""Bidirectional LSTM CRF Sequence Labeling Model"""
@classmethod
def get_default_hyper_parameters(cls) -> Dict[str, Dict[str, Any]]:
"""
Get hyper parameters of model
Returns:
hyper parameters dict
"""
return {
'layer_blstm': {
'units': 128,
'return_sequences': True
},
'layer_dense': {
'units': 64,
'activation': 'tanh'
}
}
def build_model_arc(self):
"""
build model architectural
"""
output_dim = len(self.processor.label2idx)
config = self.hyper_parameters
embed_model = self.embedding.embed_model
layer_blstm = L.Bidirectional(L.LSTM(**config['layer_blstm']),
name='layer_blstm')
layer_dense = L.Dense(**config['layer_dense'], name='layer_dense')
layer_crf_dense = L.Dense(output_dim, name='layer_crf_dense')
layer_crf = CRF(output_dim, name='layer_crf')
tensor = layer_blstm(embed_model.output)
tensor = layer_dense(tensor)
tensor = layer_crf_dense(tensor)
output_tensor = layer_crf(tensor)
self.layer_crf = layer_crf
self.tf_model = keras.Model(embed_model.inputs, output_tensor)
def compile_model(self, **kwargs):
if kwargs.get('loss') is None:
kwargs['loss'] = self.layer_crf.loss
if kwargs.get('metrics') is None:
kwargs['metrics'] = [self.layer_crf.viterbi_accuracy]
super(BiLSTM_CRF_Model, self).compile_model(**kwargs)
class BiGRU_Model(BaseLabelingModel):
"""Bidirectional GRU Sequence Labeling Model"""
@classmethod
def get_default_hyper_parameters(cls) -> Dict[str, Dict[str, Any]]:
"""
Get hyper parameters of model
Returns:
hyper parameters dict
"""
return {
'layer_bgru': {
'units': 128,
'return_sequences': True
},
'layer_dropout': {
'rate': 0.4
},
'layer_time_distributed': {},
'layer_activation': {
'activation': 'softmax'
}
}
def build_model_arc(self):
"""
build model architectural
"""
output_dim = len(self.processor.label2idx)
config = self.hyper_parameters
embed_model = self.embedding.embed_model
layer_blstm = L.Bidirectional(L.GRU(**config['layer_bgru']),
name='layer_bgru')
layer_dropout = L.Dropout(**config['layer_dropout'],
name='layer_dropout')
layer_time_distributed = L.TimeDistributed(L.Dense(output_dim,
**config['layer_time_distributed']),
name='layer_time_distributed')
layer_activation = L.Activation(**config['layer_activation'])
tensor = layer_blstm(embed_model.output)
tensor = layer_dropout(tensor)
tensor = layer_time_distributed(tensor)
output_tensor = layer_activation(tensor)
self.tf_model = keras.Model(embed_model.inputs, output_tensor)
class BiGRU_CRF_Model(BaseLabelingModel):
"""Bidirectional GRU CRF Sequence Labeling Model"""
@classmethod
def get_default_hyper_parameters(cls) -> Dict[str, Dict[str, Any]]:
"""
Get hyper parameters of model
Returns:
hyper parameters dict
"""
return {
'layer_bgru': {
'units': 128,
'return_sequences': True
},
'layer_dense': {
'units': 64,
'activation': 'tanh'
}
}
def build_model_arc(self):
"""
build model architectural
"""
output_dim = len(self.processor.label2idx)
config = self.hyper_parameters
embed_model = self.embedding.embed_model
layer_blstm = L.Bidirectional(L.GRU(**config['layer_bgru']),
name='layer_bgru')
layer_dense = L.Dense(**config['layer_dense'], name='layer_dense')
layer_crf_dense = L.Dense(output_dim, name='layer_crf_dense')
layer_crf = CRF(output_dim, name='layer_crf')
tensor = layer_blstm(embed_model.output)
tensor = layer_dense(tensor)
tensor = layer_crf_dense(tensor)
output_tensor = layer_crf(tensor)
self.layer_crf = layer_crf
self.tf_model = keras.Model(embed_model.inputs, output_tensor)
def compile_model(self, **kwargs):
if kwargs.get('loss') is None:
kwargs['loss'] = self.layer_crf.loss
if kwargs.get('metrics') is None:
kwargs['metrics'] = [self.layer_crf.viterbi_accuracy]
super(BiGRU_CRF_Model, self).compile_model(**kwargs)
class CNN_LSTM_Model(BaseLabelingModel):
"""CNN LSTM Sequence Labeling Model"""
@classmethod
def get_default_hyper_parameters(cls) -> Dict[str, Dict[str, Any]]:
"""
Get hyper parameters of model
Returns:
hyper parameters dict
"""
return {
'layer_conv': {
'filters': 32,
'kernel_size': 3,
'padding': 'same',
'activation': 'relu'
},
'layer_lstm': {
'units': 128,
'return_sequences': True
},
'layer_dropout': {
'rate': 0.4
},
'layer_time_distributed': {},
'layer_activation': {
'activation': 'softmax'
}
}
def build_model_arc(self):
"""
build model architectural
"""
output_dim = len(self.processor.label2idx)
config = self.hyper_parameters
embed_model = self.embedding.embed_model
layer_conv = L.Conv1D(**config['layer_conv'],
name='layer_conv')
layer_lstm = L.LSTM(**config['layer_lstm'],
name='layer_lstm')
layer_dropout = L.Dropout(**config['layer_dropout'],
name='layer_dropout')
layer_time_distributed = L.TimeDistributed(L.Dense(output_dim,
**config['layer_time_distributed']),
name='layer_time_distributed')
layer_activation = L.Activation(**config['layer_activation'])
tensor = layer_conv(embed_model.output)
tensor = layer_lstm(tensor)
tensor = layer_dropout(tensor)
tensor = layer_time_distributed(tensor)
output_tensor = layer_activation(tensor)
self.tf_model = keras.Model(embed_model.inputs, output_tensor)
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG)
from kashgari.corpus import ChineseDailyNerCorpus
valid_x, valid_y = ChineseDailyNerCorpus.load_data('train')
model = BiLSTM_CRF_Model()
model.fit(valid_x, valid_y, epochs=50, batch_size=64)
model.evaluate(valid_x, valid_y)
| 125softNLP | /125softNLP-0.0.1-py3-none-any.whl/pysoftNLP/kashgari/tasks/labeling/models.py | models.py |
# encoding: utf-8
# author: BrikerMan
# contact: eliyar917@gmail.com
# blog: https://eliyar.biz
# file: base_classification_model.py
# time: 2019-05-22 11:23
import random
import logging
import pysoftNLP.kashgari as kashgari
from typing import Dict, Any, Tuple, Optional, List
from pysoftNLP.kashgari.tasks.base_model import BaseModel, BareEmbedding
from pysoftNLP.kashgari.embeddings.base_embedding import Embedding
from sklearn import metrics
class BaseClassificationModel(BaseModel):
__task__ = 'classification'
def __init__(self,
embedding: Optional[Embedding] = None,
hyper_parameters: Optional[Dict[str, Dict[str, Any]]] = None):
super(BaseClassificationModel, self).__init__(embedding, hyper_parameters)
if hyper_parameters is None and \
self.embedding.processor.__getattribute__('multi_label') is True:
last_layer_name = list(self.hyper_parameters.keys())[-1]
self.hyper_parameters[last_layer_name]['activation'] = 'sigmoid'
logging.warning("Activation Layer's activate function changed to sigmoid for"
" multi-label classification question")
@classmethod
def get_default_hyper_parameters(cls) -> Dict[str, Dict[str, Any]]:
raise NotImplementedError
def build_model_arc(self):
raise NotImplementedError
def compile_model(self, **kwargs):
if kwargs.get('loss') is None and self.embedding.processor.multi_label:
kwargs['loss'] = 'binary_crossentropy'
super(BaseClassificationModel, self).compile_model(**kwargs)
def predict(self,
x_data,
batch_size=32,
multi_label_threshold: float = 0.5,
debug_info=False,
predict_kwargs: Dict = None):
"""
Generates output predictions for the input samples.
Computation is done in batches.
Args:
x_data: The input data, as a Numpy array (or list of Numpy arrays if the model has multiple inputs).
batch_size: Integer. If unspecified, it will default to 32.
multi_label_threshold:
debug_info: Bool, Should print out the logging info.
predict_kwargs: arguments passed to ``predict()`` function of ``tf.keras.Model``
Returns:
array(s) of predictions.
"""
with kashgari.utils.custom_object_scope():
tensor = self.embedding.process_x_dataset(x_data)
pred = self.tf_model.predict(tensor, batch_size=batch_size)
if self.embedding.processor.multi_label:
if debug_info:
logging.info('raw output: {}'.format(pred))
pred[pred >= multi_label_threshold] = 1
pred[pred < multi_label_threshold] = 0
else:
pred = pred.argmax(-1)
res = self.embedding.reverse_numerize_label_sequences(pred)
if debug_info:
logging.info('input: {}'.format(tensor))
logging.info('output: {}'.format(pred))
logging.info('output argmax: {}'.format(pred.argmax(-1)))
return res
def predict_top_k_class(self,
x_data,
top_k=5,
batch_size=32,
debug_info=False,
predict_kwargs: Dict = None) -> List[Dict]:
"""
Generates output predictions with confidence for the input samples.
Computation is done in batches.
Args:
x_data: The input data, as a Numpy array (or list of Numpy arrays if the model has multiple inputs).
top_k: int
batch_size: Integer. If unspecified, it will default to 32.
debug_info: Bool, Should print out the logging info.
predict_kwargs: arguments passed to ``predict()`` function of ``tf.keras.Model``
Returns:
array(s) of predictions.
single-label classification:
[
{
"label": "chat",
"confidence": 0.5801531,
"candidates": [
{ "label": "cookbook", "confidence": 0.1886314 },
{ "label": "video", "confidence": 0.13805099 },
{ "label": "health", "confidence": 0.013852648 },
{ "label": "translation", "confidence": 0.012913573 }
]
}
]
multi-label classification:
[
{
"candidates": [
{ "confidence": 0.9959336, "label": "toxic" },
{ "confidence": 0.9358089, "label": "obscene" },
{ "confidence": 0.6882098, "label": "insult" },
{ "confidence": 0.13540423, "label": "severe_toxic" },
{ "confidence": 0.017219543, "label": "identity_hate" }
]
}
]
"""
if predict_kwargs is None:
predict_kwargs = {}
with kashgari.utils.custom_object_scope():
tensor = self.embedding.process_x_dataset(x_data)
pred = self.tf_model.predict(tensor, batch_size=batch_size, **predict_kwargs)
new_results = []
for sample_prob in pred:
sample_res = zip(self.label2idx.keys(), sample_prob)
sample_res = sorted(sample_res, key=lambda k: k[1], reverse=True)
data = {}
for label, confidence in sample_res[:top_k]:
if 'candidates' not in data:
if self.embedding.processor.multi_label:
data['candidates'] = []
else:
data['label'] = label
data['confidence'] = confidence
data['candidates'] = []
continue
data['candidates'].append({
'label': label,
'confidence': confidence
})
new_results.append(data)
if debug_info:
logging.info('input: {}'.format(tensor))
logging.info('output: {}'.format(pred))
logging.info('output argmax: {}'.format(pred.argmax(-1)))
return new_results
def evaluate(self,
x_data,
y_data,
batch_size=None,
digits=4,
output_dict=False,
debug_info=False) -> Optional[Tuple[float, float, Dict]]:
y_pred = self.predict(x_data, batch_size=batch_size)
if debug_info:
for index in random.sample(list(range(len(x_data))), 5):
logging.debug('------ sample {} ------'.format(index))
logging.debug('x : {}'.format(x_data[index]))
logging.debug('y : {}'.format(y_data[index]))
logging.debug('y_pred : {}'.format(y_pred[index]))
if self.processor.multi_label:
y_pred_b = self.processor.multi_label_binarizer.fit_transform(y_pred)
y_true_b = self.processor.multi_label_binarizer.fit_transform(y_data)
report = metrics.classification_report(y_pred_b,
y_true_b,
target_names=self.processor.multi_label_binarizer.classes_,
output_dict=output_dict,
digits=digits)
else:
report = metrics.classification_report(y_data,
y_pred,
output_dict=output_dict,
digits=digits)
if not output_dict:
print(report)
else:
return report
if __name__ == "__main__":
print("Hello world")
| 125softNLP | /125softNLP-0.0.1-py3-none-any.whl/pysoftNLP/kashgari/tasks/classification/base_model.py | base_model.py |
# encoding: utf-8
# author: BrikerMan
# contact: eliyar917@gmail.com
# blog: https://eliyar.biz
# file: __init__.py
# time: 2019-05-22 12:40
from pysoftNLP.kashgari.tasks.classification.models import BiLSTM_Model
from pysoftNLP.kashgari.tasks.classification.models import BiGRU_Model
from pysoftNLP.kashgari.tasks.classification.models import CNN_Model
from pysoftNLP.kashgari.tasks.classification.models import CNN_LSTM_Model
from pysoftNLP.kashgari.tasks.classification.models import CNN_GRU_Model
from pysoftNLP.kashgari.tasks.classification.models import AVCNN_Model
from pysoftNLP.kashgari.tasks.classification.models import KMax_CNN_Model
from pysoftNLP.kashgari.tasks.classification.models import R_CNN_Model
from pysoftNLP.kashgari.tasks.classification.models import AVRNN_Model
from pysoftNLP.kashgari.tasks.classification.models import Dropout_BiGRU_Model
from pysoftNLP.kashgari.tasks.classification.models import Dropout_AVRNN_Model
from pysoftNLP.kashgari.tasks.classification.dpcnn_model import DPCNN_Model
BLSTMModel = BiLSTM_Model
BGRUModel = BiGRU_Model
CNNModel = CNN_Model
CNNLSTMModel = CNN_LSTM_Model
CNNGRUModel = CNN_GRU_Model
AVCNNModel = AVCNN_Model
KMaxCNNModel = KMax_CNN_Model
RCNNModel = R_CNN_Model
AVRNNModel = AVRNN_Model
DropoutBGRUModel = Dropout_BiGRU_Model
DropoutAVRNNModel = Dropout_AVRNN_Model
DPCNN = DPCNN_Model
| 125softNLP | /125softNLP-0.0.1-py3-none-any.whl/pysoftNLP/kashgari/tasks/classification/__init__.py | __init__.py |
# encoding: utf-8
# author: BrikerMan
# contact: eliyar917@gmail.com
# blog: https://eliyar.biz
# file: models.py
# time: 2019-05-22 11:26
import logging
import tensorflow as tf
from typing import Dict, Any
from pysoftNLP.kashgari.layers import L, AttentionWeightedAverageLayer, KMaxPoolingLayer
from pysoftNLP.kashgari.tasks.classification.base_model import BaseClassificationModel
class BiLSTM_Model(BaseClassificationModel):
@classmethod
def get_default_hyper_parameters(cls) -> Dict[str, Dict[str, Any]]:
return {
'layer_bi_lstm': {
'units': 128,
'return_sequences': False
},
'layer_dense': {
'activation': 'softmax'
}
}
def build_model_arc(self):
output_dim = len(self.processor.label2idx)
config = self.hyper_parameters
embed_model = self.embedding.embed_model
layer_bi_lstm = L.Bidirectional(L.LSTM(**config['layer_bi_lstm']))
layer_dense = L.Dense(output_dim, **config['layer_dense'])
tensor = layer_bi_lstm(embed_model.output)
output_tensor = layer_dense(tensor)
self.tf_model = tf.keras.Model(embed_model.inputs, output_tensor)
class BiGRU_Model(BaseClassificationModel):
@classmethod
def get_default_hyper_parameters(cls) -> Dict[str, Dict[str, Any]]:
return {
'layer_bi_gru': {
'units': 128,
'return_sequences': False
},
'layer_dense': {
'activation': 'softmax'
}
}
def build_model_arc(self):
output_dim = len(self.processor.label2idx)
config = self.hyper_parameters
embed_model = self.embedding.embed_model
layer_bi_gru = L.Bidirectional(L.GRU(**config['layer_bi_gru']))
layer_dense = L.Dense(output_dim, **config['layer_dense'])
tensor = layer_bi_gru(embed_model.output)
output_tensor = layer_dense(tensor)
self.tf_model = tf.keras.Model(embed_model.inputs, output_tensor)
class CNN_Model(BaseClassificationModel):
@classmethod
def get_default_hyper_parameters(cls) -> Dict[str, Dict[str, Any]]:
return {
'conv1d_layer': {
'filters': 128,
'kernel_size': 5,
'activation': 'relu'
},
'max_pool_layer': {},
'dense_layer': {
'units': 64,
'activation': 'relu'
},
'activation_layer': {
'activation': 'softmax'
},
}
def build_model_arc(self):
output_dim = len(self.processor.label2idx)
config = self.hyper_parameters
embed_model = self.embedding.embed_model
# build model structure in sequent way
layers_seq = []
layers_seq.append(L.Conv1D(**config['conv1d_layer']))
layers_seq.append(L.GlobalMaxPooling1D(**config['max_pool_layer']))
layers_seq.append(L.Dense(**config['dense_layer']))
layers_seq.append(L.Dense(output_dim, **config['activation_layer']))
tensor = embed_model.output
for layer in layers_seq:
tensor = layer(tensor)
self.tf_model = tf.keras.Model(embed_model.inputs, tensor)
class CNN_LSTM_Model(BaseClassificationModel):
@classmethod
def get_default_hyper_parameters(cls) -> Dict[str, Dict[str, Any]]:
return {
'conv_layer': {
'filters': 32,
'kernel_size': 3,
'padding': 'same',
'activation': 'relu'
},
'max_pool_layer': {
'pool_size': 2
},
'lstm_layer': {
'units': 100
},
'activation_layer': {
'activation': 'softmax'
},
}
def build_model_arc(self):
output_dim = len(self.processor.label2idx)
config = self.hyper_parameters
embed_model = self.embedding.embed_model
layers_seq = []
layers_seq.append(L.Conv1D(**config['conv_layer']))
layers_seq.append(L.MaxPooling1D(**config['max_pool_layer']))
layers_seq.append(L.LSTM(**config['lstm_layer']))
layers_seq.append(L.Dense(output_dim, **config['activation_layer']))
tensor = embed_model.output
for layer in layers_seq:
tensor = layer(tensor)
self.tf_model = tf.keras.Model(embed_model.inputs, tensor)
class CNN_GRU_Model(BaseClassificationModel):
@classmethod
def get_default_hyper_parameters(cls) -> Dict[str, Dict[str, Any]]:
return {
'conv_layer': {
'filters': 32,
'kernel_size': 3,
'padding': 'same',
'activation': 'relu'
},
'max_pool_layer': {
'pool_size': 2
},
'gru_layer': {
'units': 100
},
'activation_layer': {
'activation': 'softmax'
},
}
def build_model_arc(self):
output_dim = len(self.processor.label2idx)
config = self.hyper_parameters
embed_model = self.embedding.embed_model
layers_seq = []
layers_seq.append(L.Conv1D(**config['conv_layer']))
layers_seq.append(L.MaxPooling1D(**config['max_pool_layer']))
layers_seq.append(L.GRU(**config['gru_layer']))
layers_seq.append(L.Dense(output_dim, **config['activation_layer']))
tensor = embed_model.output
for layer in layers_seq:
tensor = layer(tensor)
self.tf_model = tf.keras.Model(embed_model.inputs, tensor)
class AVCNN_Model(BaseClassificationModel):
@classmethod
def get_default_hyper_parameters(cls) -> Dict[str, Dict[str, Any]]:
return {
'spatial_dropout': {
'rate': 0.25
},
'conv_0': {
'filters': 300,
'kernel_size': 1,
'kernel_initializer': 'normal',
'padding': 'valid',
'activation': 'relu'
},
'conv_1': {
'filters': 300,
'kernel_size': 2,
'kernel_initializer': 'normal',
'padding': 'valid',
'activation': 'relu'
},
'conv_2': {
'filters': 300,
'kernel_size': 3,
'kernel_initializer': 'normal',
'padding': 'valid',
'activation': 'relu'
},
'conv_3': {
'filters': 300,
'kernel_size': 4,
'kernel_initializer': 'normal',
'padding': 'valid',
'activation': 'relu'
},
# ---
'attn_0': {},
'avg_0': {},
'maxpool_0': {},
# ---
'maxpool_1': {},
'attn_1': {},
'avg_1': {},
# ---
'maxpool_2': {},
'attn_2': {},
'avg_2': {},
# ---
'maxpool_3': {},
'attn_3': {},
'avg_3': {},
# ---
'v_col3': {
# 'mode': 'concat',
'axis': 1
},
'merged_tensor': {
# 'mode': 'concat',
'axis': 1
},
'dropout': {
'rate': 0.7
},
'dense': {
'units': 144,
'activation': 'relu'
},
'activation_layer': {
'activation': 'softmax'
},
}
def build_model_arc(self):
output_dim = len(self.processor.label2idx)
config = self.hyper_parameters
embed_model = self.embedding.embed_model
layer_embed_dropout = L.SpatialDropout1D(**config['spatial_dropout'])
layers_conv = [L.Conv1D(**config[f'conv_{i}']) for i in range(4)]
layers_sensor = []
layers_sensor.append(L.GlobalMaxPooling1D())
layers_sensor.append(AttentionWeightedAverageLayer())
layers_sensor.append(L.GlobalAveragePooling1D())
layer_view = L.Concatenate(**config['v_col3'])
layer_allviews = L.Concatenate(**config['merged_tensor'])
layers_seq = []
layers_seq.append(L.Dropout(**config['dropout']))
layers_seq.append(L.Dense(**config['dense']))
layers_seq.append(L.Dense(output_dim, **config['activation_layer']))
embed_tensor = layer_embed_dropout(embed_model.output)
tensors_conv = [layer_conv(embed_tensor) for layer_conv in layers_conv]
tensors_matrix_sensor = []
for tensor_conv in tensors_conv:
tensor_sensors = []
tensor_sensors = [layer_sensor(tensor_conv) for layer_sensor in layers_sensor]
# tensor_sensors.append(L.GlobalMaxPooling1D()(tensor_conv))
# tensor_sensors.append(AttentionWeightedAverageLayer()(tensor_conv))
# tensor_sensors.append(L.GlobalAveragePooling1D()(tensor_conv))
tensors_matrix_sensor.append(tensor_sensors)
tensors_views = [layer_view(list(tensors)) for tensors in zip(*tensors_matrix_sensor)]
tensor = layer_allviews(tensors_views)
# tensors_v_cols = [L.concatenate(tensors, **config['v_col3']) for tensors
# in zip(*tensors_matrix_sensor)]
# tensor = L.concatenate(tensors_v_cols, **config['merged_tensor'])
for layer in layers_seq:
tensor = layer(tensor)
self.tf_model = tf.keras.Model(embed_model.inputs, tensor)
class KMax_CNN_Model(BaseClassificationModel):
@classmethod
def get_default_hyper_parameters(cls) -> Dict[str, Dict[str, Any]]:
return {
'spatial_dropout': {
'rate': 0.2
},
'conv_0': {
'filters': 180,
'kernel_size': 1,
'kernel_initializer': 'normal',
'padding': 'valid',
'activation': 'relu'
},
'conv_1': {
'filters': 180,
'kernel_size': 2,
'kernel_initializer': 'normal',
'padding': 'valid',
'activation': 'relu'
},
'conv_2': {
'filters': 180,
'kernel_size': 3,
'kernel_initializer': 'normal',
'padding': 'valid',
'activation': 'relu'
},
'conv_3': {
'filters': 180,
'kernel_size': 4,
'kernel_initializer': 'normal',
'padding': 'valid',
'activation': 'relu'
},
'maxpool_i4': {
'k': 3
},
'merged_tensor': {
# 'mode': 'concat',
'axis': 1
},
'dropout': {
'rate': 0.6
},
'dense': {
'units': 144,
'activation': 'relu'
},
'activation_layer': {
'activation': 'softmax'
},
}
def build_model_arc(self):
output_dim = len(self.processor.label2idx)
config = self.hyper_parameters
embed_model = self.embedding.embed_model
layer_embed_dropout = L.SpatialDropout1D(**config['spatial_dropout'])
layers_conv = [L.Conv1D(**config[f'conv_{i}']) for i in range(4)]
layers_sensor = [KMaxPoolingLayer(**config['maxpool_i4']),
L.Flatten()]
layer_concat = L.Concatenate(**config['merged_tensor'])
layers_seq = []
layers_seq.append(L.Dropout(**config['dropout']))
layers_seq.append(L.Dense(**config['dense']))
layers_seq.append(L.Dense(output_dim, **config['activation_layer']))
embed_tensor = layer_embed_dropout(embed_model.output)
tensors_conv = [layer_conv(embed_tensor) for layer_conv in layers_conv]
tensors_sensor = []
for tensor_conv in tensors_conv:
tensor_sensor = tensor_conv
for layer_sensor in layers_sensor:
tensor_sensor = layer_sensor(tensor_sensor)
tensors_sensor.append(tensor_sensor)
tensor = layer_concat(tensors_sensor)
# tensor = L.concatenate(tensors_sensor, **config['merged_tensor'])
for layer in layers_seq:
tensor = layer(tensor)
self.tf_model = tf.keras.Model(embed_model.inputs, tensor)
class R_CNN_Model(BaseClassificationModel):
@classmethod
def get_default_hyper_parameters(cls) -> Dict[str, Dict[str, Any]]:
return {
'spatial_dropout': {
'rate': 0.2
},
'rnn_0': {
'units': 64,
'return_sequences': True
},
'conv_0': {
'filters': 128,
'kernel_size': 2,
'kernel_initializer': 'normal',
'padding': 'valid',
'activation': 'relu',
'strides': 1
},
'maxpool': {},
'attn': {},
'average': {},
'concat': {
'axis': 1
},
'dropout': {
'rate': 0.5
},
'dense': {
'units': 120,
'activation': 'relu'
},
'activation_layer': {
'activation': 'softmax'
},
}
def build_model_arc(self):
output_dim = len(self.processor.label2idx)
config = self.hyper_parameters
embed_model = self.embedding.embed_model
layers_rcnn_seq = []
layers_rcnn_seq.append(L.SpatialDropout1D(**config['spatial_dropout']))
layers_rcnn_seq.append(L.Bidirectional(L.GRU(**config['rnn_0'])))
layers_rcnn_seq.append(L.Conv1D(**config['conv_0']))
layers_sensor = []
layers_sensor.append(L.GlobalMaxPooling1D())
layers_sensor.append(AttentionWeightedAverageLayer())
layers_sensor.append(L.GlobalAveragePooling1D())
layer_concat = L.Concatenate(**config['concat'])
layers_full_connect = []
layers_full_connect.append(L.Dropout(**config['dropout']))
layers_full_connect.append(L.Dense(**config['dense']))
layers_full_connect.append(L.Dense(output_dim, **config['activation_layer']))
tensor = embed_model.output
for layer in layers_rcnn_seq:
tensor = layer(tensor)
tensors_sensor = [layer(tensor) for layer in layers_sensor]
tensor_output = layer_concat(tensors_sensor)
# tensor_output = L.concatenate(tensor_sensors, **config['concat'])
for layer in layers_full_connect:
tensor_output = layer(tensor_output)
self.tf_model = tf.keras.Model(embed_model.inputs, tensor_output)
class AVRNN_Model(BaseClassificationModel):
@classmethod
def get_default_hyper_parameters(cls) -> Dict[str, Dict[str, Any]]:
return {
'spatial_dropout': {
'rate': 0.25
},
'rnn_0': {
'units': 60,
'return_sequences': True
},
'rnn_1': {
'units': 60,
'return_sequences': True
},
'concat_rnn': {
'axis': 2
},
'last': {},
'maxpool': {},
'attn': {},
'average': {},
'all_views': {
'axis': 1
},
'dropout': {
'rate': 0.5
},
'dense': {
'units': 144,
'activation': 'relu'
},
'activation_layer': {
'activation': 'softmax'
},
}
def build_model_arc(self):
output_dim = len(self.processor.label2idx)
config = self.hyper_parameters
embed_model = self.embedding.embed_model
layers_rnn0 = []
layers_rnn0.append(L.SpatialDropout1D(**config['spatial_dropout']))
layers_rnn0.append(L.Bidirectional(L.GRU(**config['rnn_0'])))
layer_bi_rnn1 = L.Bidirectional(L.GRU(**config['rnn_1']))
layer_concat = L.Concatenate(**config['concat_rnn'])
layers_sensor = []
layers_sensor.append(L.Lambda(lambda t: t[:, -1], name='last'))
layers_sensor.append(L.GlobalMaxPooling1D())
layers_sensor.append(AttentionWeightedAverageLayer())
layers_sensor.append(L.GlobalAveragePooling1D())
layer_allviews = L.Concatenate(**config['all_views'])
layers_full_connect = []
layers_full_connect.append(L.Dropout(**config['dropout']))
layers_full_connect.append(L.Dense(**config['dense']))
layers_full_connect.append(L.Dense(output_dim, **config['activation_layer']))
tensor_rnn = embed_model.output
for layer in layers_rnn0:
tensor_rnn = layer(tensor_rnn)
tensor_concat = layer_concat([tensor_rnn, layer_bi_rnn1(tensor_rnn)])
tensor_sensors = [layer(tensor_concat) for layer in layers_sensor]
tensor_output = layer_allviews(tensor_sensors)
for layer in layers_full_connect:
tensor_output = layer(tensor_output)
self.tf_model = tf.keras.Model(embed_model.inputs, tensor_output)
class Dropout_BiGRU_Model(BaseClassificationModel):
@classmethod
def get_default_hyper_parameters(cls) -> Dict[str, Dict[str, Any]]:
return {
'spatial_dropout': {
'rate': 0.15
},
'rnn_0': {
'units': 64,
'return_sequences': True
},
'dropout_rnn': {
'rate': 0.35
},
'rnn_1': {
'units': 64,
'return_sequences': True
},
'last': {},
'maxpool': {},
'average': {},
'all_views': {
'axis': 1
},
'dropout': {
'rate': 0.5
},
'dense': {
'units': 72,
'activation': 'relu'
},
'activation_layer': {
'activation': 'softmax'
},
}
def build_model_arc(self):
output_dim = len(self.processor.label2idx)
config = self.hyper_parameters
embed_model = self.embedding.embed_model
layers_rnn = []
layers_rnn.append(L.SpatialDropout1D(**config['spatial_dropout']))
layers_rnn.append(L.Bidirectional(L.GRU(**config['rnn_0'])))
layers_rnn.append(L.Dropout(**config['dropout_rnn']))
layers_rnn.append(L.Bidirectional(L.GRU(**config['rnn_1'])))
layers_sensor = []
layers_sensor.append(L.Lambda(lambda t: t[:, -1], name='last'))
layers_sensor.append(L.GlobalMaxPooling1D())
layers_sensor.append(L.GlobalAveragePooling1D())
layer_allviews = L.Concatenate(**config['all_views'])
layers_full_connect = []
layers_full_connect.append(L.Dropout(**config['dropout']))
layers_full_connect.append(L.Dense(**config['dense']))
layers_full_connect.append(L.Dense(output_dim, **config['activation_layer']))
tensor_rnn = embed_model.output
for layer in layers_rnn:
tensor_rnn = layer(tensor_rnn)
tensor_sensors = [layer(tensor_rnn) for layer in layers_sensor]
tensor_output = layer_allviews(tensor_sensors)
for layer in layers_full_connect:
tensor_output = layer(tensor_output)
self.tf_model = tf.keras.Model(embed_model.inputs, tensor_output)
class Dropout_AVRNN_Model(BaseClassificationModel):
@classmethod
def get_default_hyper_parameters(cls) -> Dict[str, Dict[str, Any]]:
return {
'spatial_dropout': {
'rate': 0.25
},
'rnn_0': {
'units': 56,
'return_sequences': True
},
'rnn_dropout': {
'rate': 0.3
},
'rnn_1': {
'units': 56,
'return_sequences': True
},
'last': {},
'maxpool': {},
'attn': {},
'average': {},
'all_views': {
'axis': 1
},
'dropout_0': {
'rate': 0.5
},
'dense': {
'units': 128,
'activation': 'relu'
},
'dropout_1': {
'rate': 0.25
},
'activation_layer': {
'activation': 'softmax'
},
}
def build_model_arc(self):
output_dim = len(self.processor.label2idx)
config = self.hyper_parameters
embed_model = self.embedding.embed_model
layers_rnn = []
layers_rnn.append(L.SpatialDropout1D(**config['spatial_dropout']))
layers_rnn.append(L.Bidirectional(L.GRU(**config['rnn_0'])))
layers_rnn.append(L.SpatialDropout1D(**config['rnn_dropout']))
layers_rnn.append(L.Bidirectional(L.GRU(**config['rnn_1'])))
layers_sensor = []
layers_sensor.append(L.Lambda(lambda t: t[:, -1], name='last'))
layers_sensor.append(L.GlobalMaxPooling1D())
layers_sensor.append(AttentionWeightedAverageLayer())
layers_sensor.append(L.GlobalAveragePooling1D())
layer_allviews = L.Concatenate(**config['all_views'])
layers_full_connect = []
layers_full_connect.append(L.Dropout(**config['dropout_0']))
layers_full_connect.append(L.Dense(**config['dense']))
layers_full_connect.append(L.Dropout(**config['dropout_1']))
layers_full_connect.append(L.Dense(output_dim, **config['activation_layer']))
tensor_rnn = embed_model.output
for layer in layers_rnn:
tensor_rnn = layer(tensor_rnn)
tensor_sensors = [layer(tensor_rnn) for layer in layers_sensor]
tensor_output = layer_allviews(tensor_sensors)
for layer in layers_full_connect:
tensor_output = layer(tensor_output)
self.tf_model = tf.keras.Model(embed_model.inputs, tensor_output)
if __name__ == "__main__":
print(BiLSTM_Model.get_default_hyper_parameters())
logging.basicConfig(level=logging.DEBUG)
from kashgari.corpus import SMP2018ECDTCorpus
x, y = SMP2018ECDTCorpus.load_data()
import kashgari
from kashgari.processors.classification_processor import ClassificationProcessor
from kashgari.embeddings import BareEmbedding
processor = ClassificationProcessor(multi_label=False)
embed = BareEmbedding(task=kashgari.CLASSIFICATION, sequence_length=30, processor=processor)
m = BiLSTM_Model(embed)
# m.build_model(x, y)
m.fit(x, y, epochs=2)
print(m.predict(x[:10]))
# m.evaluate(x, y)
print(m.predict_top_k_class(x[:10]))
| 125softNLP | /125softNLP-0.0.1-py3-none-any.whl/pysoftNLP/kashgari/tasks/classification/models.py | models.py |
# encoding: utf-8
# author: Alex
# contact: ialexwwang@gmail.com
# version: 0.1
# license: Apache Licence
# file: dpcnn_model.py
# time: 2019-07-02 19:15
# Reference:
# https://ai.tencent.com/ailab/media/publications/ACL3-Brady.pdf
# https://github.com/Cheneng/DPCNN
# https://github.com/miracleyoo/DPCNN-TextCNN-Pytorch-Inception
# https://www.kaggle.com/michaelsnell/conv1d-dpcnn-in-keras
from math import log2, floor
from typing import Dict, Any
import tensorflow as tf
from pysoftNLP.kashgari.layers import L, KMaxPoolingLayer
from pysoftNLP.kashgari.tasks.classification.base_model import BaseClassificationModel
class DPCNN_Model(BaseClassificationModel):
'''
This implementation of DPCNN requires a clear declared sequence length.
So sequences input in should be padded or cut to a given length in advance.
'''
@classmethod
def get_default_hyper_parameters(cls) -> Dict[str, Dict[str, Any]]:
pool_type = 'max'
filters = 250
activation = 'linear'
return {
'region_embedding': {
'filters': filters,
'kernel_size': 3,
'strides': 1,
'padding': 'same',
'activation': activation,
'name': 'region_embedding',
},
'region_dropout': {
'rate': 0.2,
},
'conv_block': {
'filters': filters,
'kernel_size': 3,
'activation': activation,
'shortcut': True,
},
'resnet_block': {
'filters': filters,
'kernel_size': 3,
'activation': activation,
'shortcut': True,
'pool_type': pool_type,
'sorted': True,
},
'dense': {
'units': 256,
'activation': activation,
},
'dropout': {
'rate': 0.5,
},
'activation': {
'activation': 'softmax',
}
}
def downsample(self, inputs, pool_type: str = 'max',
sorted: bool = True, stage: int = 1): # noqa: A002
layers_pool = []
if pool_type == 'max':
layers_pool.append(
L.MaxPooling1D(pool_size=3,
strides=2,
padding='same',
name=f'pool_{stage}'))
elif pool_type == 'k_max':
k = int(inputs.shape[1].value / 2)
layers_pool.append(
KMaxPoolingLayer(k=k,
sorted=sorted,
name=f'pool_{stage}'))
elif pool_type == 'conv':
layers_pool.append(
L.Conv1D(filters=inputs.shape[-1].value,
kernel_size=3,
strides=2,
padding='same',
name=f'pool_{stage}'))
layers_pool.append(
L.BatchNormalization())
elif pool_type is None:
layers_pool = []
else:
raise ValueError(f'unsupported pooling type `{pool_type}`!')
tensor_out = inputs
for layer in layers_pool:
tensor_out = layer(tensor_out)
return tensor_out
def conv_block(self, inputs, filters: int, kernel_size: int = 3,
activation: str = 'linear', shortcut: bool = True):
layers_conv_unit = []
layers_conv_unit.append(
L.BatchNormalization())
layers_conv_unit.append(
L.PReLU())
layers_conv_unit.append(
L.Conv1D(filters=filters,
kernel_size=kernel_size,
strides=1,
padding='same',
activation=activation))
layers_conv_block = layers_conv_unit * 2
tensor_out = inputs
for layer in layers_conv_block:
tensor_out = layer(tensor_out)
if shortcut:
tensor_out = L.Add()([inputs, tensor_out])
return tensor_out
def resnet_block(self, inputs, filters: int, kernel_size: int = 3,
activation: str = 'linear', shortcut: bool = True,
pool_type: str = 'max', sorted: bool = True, stage: int = 1): # noqa: A002
tensor_pool = self.downsample(inputs, pool_type=pool_type, sorted=sorted, stage=stage)
tensor_out = self.conv_block(tensor_pool, filters=filters, kernel_size=kernel_size,
activation=activation, shortcut=shortcut)
return tensor_out
def build_model_arc(self):
output_dim = len(self.processor.label2idx)
config = self.hyper_parameters
embed_model = self.embedding.embed_model
layers_region = [
L.Conv1D(**config['region_embedding']),
L.BatchNormalization(),
L.PReLU(),
L.Dropout(**config['region_dropout'])
]
layers_main = [
L.GlobalMaxPooling1D(),
L.Dense(**config['dense']),
L.BatchNormalization(),
L.PReLU(),
L.Dropout(**config['dropout']),
L.Dense(output_dim, **config['activation'])
]
tensor_out = embed_model.output
# build region tensors
for layer in layers_region:
tensor_out = layer(tensor_out)
# build the base pyramid layer
tensor_out = self.conv_block(tensor_out, **config['conv_block'])
# build the above pyramid layers while `steps > 2`
seq_len = tensor_out.shape[1].value
if seq_len is None:
raise ValueError('`sequence_length` should be explicitly assigned, but it is `None`.')
for i in range(floor(log2(seq_len)) - 2):
tensor_out = self.resnet_block(tensor_out, stage=i + 1,
**config['resnet_block'])
for layer in layers_main:
tensor_out = layer(tensor_out)
self.tf_model = tf.keras.Model(embed_model.inputs, tensor_out)
| 125softNLP | /125softNLP-0.0.1-py3-none-any.whl/pysoftNLP/kashgari/tasks/classification/dpcnn_model.py | dpcnn_model.py |
# encoding: utf-8
# author: BrikerMan
# contact: eliyar917@gmail.com
# blog: https://eliyar.biz
# file: base_model.py
# time: 11:36 上午
from typing import Callable
from typing import Dict, Any
import numpy as np
from sklearn import metrics
from pysoftNLP.kashgari.tasks.base_model import BaseModel
class BaseScoringModel(BaseModel):
"""Base Sequence Labeling Model"""
__task__ = 'scoring'
@classmethod
def get_default_hyper_parameters(cls) -> Dict[str, Dict[str, Any]]:
raise NotImplementedError
def compile_model(self, **kwargs):
if kwargs.get('loss') is None:
kwargs['loss'] = 'mse'
if kwargs.get('optimizer') is None:
kwargs['optimizer'] = 'rmsprop'
if kwargs.get('metrics') is None:
kwargs['metrics'] = ['mae']
super(BaseScoringModel, self).compile_model(**kwargs)
def evaluate(self,
x_data,
y_data,
batch_size=None,
should_round: bool = False,
round_func: Callable = None,
digits=4,
debug_info=False) -> Dict:
"""
Build a text report showing the main classification metrics.
Args:
x_data:
y_data:
batch_size:
should_round:
round_func:
digits:
debug_info:
Returns:
"""
y_pred = self.predict(x_data, batch_size=batch_size)
if should_round:
if round_func is None:
round_func = np.round
print(self.processor.output_dim)
if self.processor.output_dim != 1:
raise ValueError('Evaluate with round function only accept 1D output')
y_pred = [round_func(i) for i in y_pred]
report = metrics.classification_report(y_data,
y_pred,
digits=digits)
report_dic = metrics.classification_report(y_data,
y_pred,
output_dict=True,
digits=digits)
print(report)
else:
mean_squared_error = metrics.mean_squared_error(y_data, y_pred)
r2_score = metrics.r2_score(y_data, y_pred)
report_dic = {
'mean_squared_error': mean_squared_error,
'r2_score': r2_score
}
print(f"mean_squared_error : {mean_squared_error}\n"
f"r2_score : {r2_score}")
return report_dic
if __name__ == "__main__":
pass
| 125softNLP | /125softNLP-0.0.1-py3-none-any.whl/pysoftNLP/kashgari/tasks/scoring/base_model.py | base_model.py |
# encoding: utf-8
# author: BrikerMan
# contact: eliyar917@gmail.com
# blog: https://eliyar.biz
# file: __init__.py
# time: 11:36 上午
from pysoftNLP.kashgari.tasks.scoring.models import BiLSTM_Model
if __name__ == "__main__":
pass
| 125softNLP | /125softNLP-0.0.1-py3-none-any.whl/pysoftNLP/kashgari/tasks/scoring/__init__.py | __init__.py |
# encoding: utf-8
# author: BrikerMan
# contact: eliyar917@gmail.com
# blog: https://eliyar.biz
# file: models.py
# time: 11:38 上午
import logging
from typing import Dict, Any
from tensorflow import keras
from pysoftNLP.kashgari.tasks.scoring.base_model import BaseScoringModel
from pysoftNLP.kashgari.layers import L
class BiLSTM_Model(BaseScoringModel):
@classmethod
def get_default_hyper_parameters(cls) -> Dict[str, Dict[str, Any]]:
return {
'layer_bi_lstm': {
'units': 128,
'return_sequences': False
},
'layer_dense': {
'activation': 'linear'
}
}
def build_model_arc(self):
output_dim = self.processor.output_dim
config = self.hyper_parameters
embed_model = self.embedding.embed_model
layer_bi_lstm = L.Bidirectional(L.LSTM(**config['layer_bi_lstm']))
layer_dense = L.Dense(output_dim, **config['layer_dense'])
tensor = layer_bi_lstm(embed_model.output)
output_tensor = layer_dense(tensor)
self.tf_model = keras.Model(embed_model.inputs, output_tensor)
if __name__ == "__main__":
from kashgari.corpus import SMP2018ECDTCorpus
import numpy as np
x, y = SMP2018ECDTCorpus.load_data('valid')
y = np.random.random((len(x), 4))
model = BiLSTM_Model()
model.fit(x, y)
print(model.predict(x[:10]))
| 125softNLP | /125softNLP-0.0.1-py3-none-any.whl/pysoftNLP/kashgari/tasks/scoring/models.py | models.py |
# encoding: utf-8
# author: BrikerMan
# contact: eliyar917@gmail.com
# blog: https://eliyar.biz
# file: base_embedding.py
# time: 2019-05-20 17:40
import json
import logging
import pydoc
from typing import Union, List, Optional, Dict
import numpy as np
from tensorflow import keras
import pysoftNLP.kashgari as kashgari
from pysoftNLP.kashgari.processors import ClassificationProcessor, LabelingProcessor, ScoringProcessor
from pysoftNLP.kashgari.processors.base_processor import BaseProcessor
L = keras.layers
class Embedding(object):
"""Base class for Embedding Model"""
def info(self) -> Dict:
return {
'processor': self.processor.info(),
'class_name': self.__class__.__name__,
'module': self.__class__.__module__,
'config': {
'sequence_length': self.sequence_length,
'embedding_size': self.embedding_size,
'task': self.task
},
'embed_model': json.loads(self.embed_model.to_json()),
}
@classmethod
def _load_saved_instance(cls,
config_dict: Dict,
model_path: str,
tf_model: keras.Model):
processor_info = config_dict['processor']
processor_class = pydoc.locate(f"{processor_info['module']}.{processor_info['class_name']}")
processor = processor_class(**processor_info['config'])
instance = cls(processor=processor,
from_saved_model=True, **config_dict['config'])
embed_model_json_str = json.dumps(config_dict['embed_model'])
instance.embed_model = keras.models.model_from_json(embed_model_json_str,
custom_objects=kashgari.custom_objects)
# Load Weights from model
for layer in instance.embed_model.layers:
layer.set_weights(tf_model.get_layer(layer.name).get_weights())
return instance
def __init__(self,
task: str = None,
sequence_length: Union[int, str] = 'auto',
embedding_size: int = 100,
processor: Optional[BaseProcessor] = None,
from_saved_model: bool = False):
self.task = task
self.embedding_size = embedding_size
if processor is None:
if task == kashgari.CLASSIFICATION:
self.processor = ClassificationProcessor()
elif task == kashgari.LABELING:
self.processor = LabelingProcessor()
elif task == kashgari.SCORING:
self.processor = ScoringProcessor()
else:
raise ValueError('Need to set the processor param, value: {labeling, classification, scoring}')
else:
self.processor = processor
self.sequence_length: Union[int, str] = sequence_length
self.embed_model: Optional[keras.Model] = None
self._tokenizer = None
@property
def token_count(self) -> int:
"""
corpus token count
"""
return len(self.processor.token2idx)
@property
def sequence_length(self) -> Union[int, str]:
"""
model sequence length
"""
return self.processor.sequence_length
@property
def label2idx(self) -> Dict[str, int]:
"""
label to index dict
"""
return self.processor.label2idx
@property
def token2idx(self) -> Dict[str, int]:
"""
token to index dict
"""
return self.processor.token2idx
@property
def tokenizer(self):
if self._tokenizer:
return self._tokenizer
else:
raise ValueError('This embedding not support built-in tokenizer')
@sequence_length.setter
def sequence_length(self, val: Union[int, str]):
if isinstance(val, str):
if val == 'auto':
logging.warning("Sequence length will auto set at 95% of sequence length")
elif val == 'variable':
val = None
else:
raise ValueError("sequence_length must be an int or 'auto' or 'variable'")
self.processor.sequence_length = val
def _build_model(self, **kwargs):
raise NotImplementedError
def analyze_corpus(self,
x: List[List[str]],
y: Union[List[List[str]], List[str]]):
"""
Prepare embedding layer and pre-processor for labeling task
Args:
x:
y:
Returns:
"""
self.processor.analyze_corpus(x, y)
if self.sequence_length == 'auto':
self.sequence_length = self.processor.dataset_info['RECOMMEND_LEN']
self._build_model()
def embed_one(self, sentence: Union[List[str], List[int]]) -> np.array:
"""
Convert one sentence to vector
Args:
sentence: target sentence, list of str
Returns:
vectorized sentence
"""
return self.embed([sentence])[0]
def embed(self,
sentence_list: Union[List[List[str]], List[List[int]]],
debug: bool = False) -> np.ndarray:
"""
batch embed sentences
Args:
sentence_list: Sentence list to embed
debug: show debug info
Returns:
vectorized sentence list
"""
tensor_x = self.process_x_dataset(sentence_list)
if debug:
logging.debug(f'sentence tensor: {tensor_x}')
embed_results = self.embed_model.predict(tensor_x)
return embed_results
def process_x_dataset(self,
data: List[List[str]],
subset: Optional[List[int]] = None) -> np.ndarray:
"""
batch process feature data while training
Args:
data: target dataset
subset: subset index list
Returns:
vectorized feature tensor
"""
return self.processor.process_x_dataset(data, self.sequence_length, subset)
def process_y_dataset(self,
data: List[List[str]],
subset: Optional[List[int]] = None) -> np.ndarray:
"""
batch process labels data while training
Args:
data: target dataset
subset: subset index list
Returns:
vectorized feature tensor
"""
return self.processor.process_y_dataset(data, self.sequence_length, subset)
def reverse_numerize_label_sequences(self,
sequences,
lengths=None):
return self.processor.reverse_numerize_label_sequences(sequences, lengths=lengths)
def __repr__(self):
return f"<{self.__class__} seq_len: {self.sequence_length}>"
def __str__(self):
return self.__repr__()
if __name__ == "__main__":
print("Hello world")
| 125softNLP | /125softNLP-0.0.1-py3-none-any.whl/pysoftNLP/kashgari/embeddings/base_embedding.py | base_embedding.py |
# encoding: utf-8
# author: BrikerMan
# contact: eliyar917@gmail.com
# blog: https://eliyar.biz
# file: base_embedding.py
# time: 2019-05-26 17:40
import os
os.environ['TF_KERAS'] = '1'
import logging
from typing import Union, Optional, Any, List, Tuple
import numpy as np
import pysoftNLP.kashgari as kashgari
import pathlib
from tensorflow.python.keras.utils import get_file
from pysoftNLP.kashgari.embeddings.base_embedding import Embedding
from pysoftNLP.kashgari.processors.base_processor import BaseProcessor
import keras_gpt_2 as gpt2
class GPT2Embedding(Embedding):
"""Pre-trained BERT embedding"""
def info(self):
info = super(GPT2Embedding, self).info()
info['config'] = {
'model_folder': self.model_folder,
'sequence_length': self.sequence_length
}
return info
def __init__(self,
model_folder: str,
task: str = None,
sequence_length: Union[Tuple[int, ...], str, int] = 'auto',
processor: Optional[BaseProcessor] = None,
from_saved_model: bool = False):
"""
Args:
task:
model_folder:
sequence_length:
processor:
from_saved_model:
"""
super(GPT2Embedding, self).__init__(task=task,
sequence_length=sequence_length,
embedding_size=0,
processor=processor,
from_saved_model=from_saved_model)
if isinstance(sequence_length, tuple):
if len(sequence_length) > 2:
raise ValueError('BERT only more 2')
else:
if not all([s == sequence_length[0] for s in sequence_length]):
raise ValueError('BERT only receive all')
if sequence_length == 'variable':
self.sequence_length = None
self.processor.token_pad = 'pad'
self.processor.token_unk = 'unk'
self.processor.token_bos = 'pad'
self.processor.token_eos = 'pad'
self.model_folder = model_folder
if not from_saved_model:
self._build_token2idx_from_gpt()
self._build_model()
def _build_token2idx_from_gpt(self):
encoder_path = os.path.join(self.model_folder, 'encoder.json')
vocab_path = os.path.join(self.model_folder, 'vocab.bpe')
bpe: gpt2.BytePairEncoding = gpt2.get_bpe_from_files(encoder_path, vocab_path)
token2idx = bpe.token_dict.copy()
self.processor.token2idx = token2idx
self.processor.idx2token = dict([(value, key) for key, value in token2idx.items()])
def _build_model(self, **kwargs):
if self.embed_model is None and self.sequence_length != 'auto':
config_path = os.path.join(self.model_folder, 'hparams.json')
checkpoint_path = os.path.join(self.model_folder, 'model.ckpt')
model = gpt2.load_trained_model_from_checkpoint(config_path,
checkpoint_path,
self.sequence_length)
if not kashgari.config.disable_auto_summary:
model.summary()
self.embed_model = model
# if self.token_count == 0:
# logging.debug('need to build after build_word2idx')
# elif self.embed_model is None:
# seq_len = self.sequence_length
# if isinstance(seq_len, tuple):
# seq_len = seq_len[0]
# if isinstance(seq_len, str):
# return
# config_path = os.path.join(self.bert_path, 'bert_config.json')
# check_point_path = os.path.join(self.bert_path, 'bert_model.ckpt')
# bert_model = keras_bert.load_trained_model_from_checkpoint(config_path,
# check_point_path,
# seq_len=seq_len)
#
# self._model = tf.keras.Model(bert_model.inputs, bert_model.output)
# bert_seq_len = int(bert_model.output.shape[1])
# if bert_seq_len < seq_len:
# logging.warning(f"Sequence length limit set to {bert_seq_len} by pre-trained model")
# self.sequence_length = bert_seq_len
# self.embedding_size = int(bert_model.output.shape[-1])
# num_layers = len(bert_model.layers)
# bert_model.summary()
# target_layer_idx = [num_layers - 1 + idx * 8 for idx in range(-3, 1)]
# features_layers = [bert_model.get_layer(index=idx).output for idx in target_layer_idx]
# embedding_layer = L.concatenate(features_layers)
# output_features = NonMaskingLayer()(embedding_layer)
#
# self.embed_model = tf.keras.Model(bert_model.inputs, output_features)
# logging.warning(f'seq_len: {self.sequence_length}')
def analyze_corpus(self,
x: Union[Tuple[List[List[str]], ...], List[List[str]]],
y: Union[List[List[Any]], List[Any]]):
"""
Prepare embedding layer and pre-processor for labeling task
Args:
x:
y:
Returns:
"""
if len(self.processor.token2idx) == 0:
self._build_token2idx_from_gpt()
super(GPT2Embedding, self).analyze_corpus(x, y)
def embed(self,
sentence_list: Union[Tuple[List[List[str]], ...], List[List[str]]],
debug: bool = False) -> np.ndarray:
"""
batch embed sentences
Args:
sentence_list: Sentence list to embed
debug: show debug log
Returns:
vectorized sentence list
"""
tensor_x = self.process_x_dataset(sentence_list)
if debug:
logging.debug(f'sentence tensor: {tensor_x}')
embed_results = self.embed_model.predict(tensor_x)
return embed_results
def process_x_dataset(self,
data: Union[Tuple[List[List[str]], ...], List[List[str]]],
subset: Optional[List[int]] = None) -> Tuple[np.ndarray, ...]:
"""
batch process feature data while training
Args:
data: target dataset
subset: subset index list
Returns:
vectorized feature tensor
"""
x1 = None
if isinstance(data, tuple):
if len(data) == 2:
x0 = self.processor.process_x_dataset(data[0], self.sequence_length, subset)
x1 = self.processor.process_x_dataset(data[1], self.sequence_length, subset)
else:
x0 = self.processor.process_x_dataset(data[0], self.sequence_length, subset)
else:
x0 = self.processor.process_x_dataset(data, self.sequence_length, subset)
if x1 is None:
x1 = np.zeros(x0.shape, dtype=np.int32)
return x0, x1
@classmethod
def load_data(cls, model_name):
"""
Download pretrained GPT-2 models
Args:
model_name: {117M, 345M}
Returns:
GPT-2 model folder
"""
model_folder: pathlib.Path = pathlib.Path(os.path.join(kashgari.macros.DATA_PATH,
'datasets',
f'gpt2-{model_name}'))
model_folder.mkdir(exist_ok=True, parents=True)
for filename in ['checkpoint', 'encoder.json', 'hparams.json', 'model.ckpt.data-00000-of-00001',
'model.ckpt.index', 'model.ckpt.meta', 'vocab.bpe']:
url = "https://storage.googleapis.com/gpt-2/models/" + model_name + "/" + filename
get_file(os.path.join(f'gpt2-{model_name}', filename),
url,
cache_dir=kashgari.macros.DATA_PATH)
return str(model_folder)
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG)
# bert_model_path = os.path.join(utils.get_project_path(), 'tests/test-data/bert')
model_folder = GPT2Embedding.load_data('117M')
print(model_folder)
b = GPT2Embedding(task=kashgari.CLASSIFICATION,
model_folder=model_folder,
sequence_length=12)
# from kashgari.corpus import SMP2018ECDTCorpus
# test_x, test_y = SMP2018ECDTCorpus.load_data('valid')
# b.analyze_corpus(test_x, test_y)
data1 = 'all work and no play makes'.split(' ')
r = b.embed([data1], True)
print(r)
print(r.shape)
| 125softNLP | /125softNLP-0.0.1-py3-none-any.whl/pysoftNLP/kashgari/embeddings/gpt_2_embedding.py | gpt_2_embedding.py |
# encoding: utf-8
# author: BrikerMan
# contact: eliyar917@gmail.com
# blog: https://eliyar.biz
# file: bert_embedding_v2.py
# time: 10:03 上午
import os
os.environ['TF_KERAS'] = '1'
import json
import codecs
import logging
from typing import Union, Optional
from bert4keras.models import build_transformer_model
import pysoftNLP.kashgari as kashgari
import tensorflow as tf
from pysoftNLP.kashgari.embeddings.bert_embedding import BERTEmbedding
from pysoftNLP.kashgari.layers import NonMaskingLayer
from pysoftNLP.kashgari.processors.base_processor import BaseProcessor
import keras_bert
class BERTEmbeddingV2(BERTEmbedding):
"""Pre-trained BERT embedding"""
def info(self):
info = super(BERTEmbedding, self).info()
info['config'] = {
'model_folder': self.model_folder,
'sequence_length': self.sequence_length
}
return info
def __init__(self,
vacab_path: str,
config_path: str,
checkpoint_path: str,
bert_type: str = 'bert',
task: str = None,
sequence_length: Union[str, int] = 'auto',
processor: Optional[BaseProcessor] = None,
from_saved_model: bool = False):
"""
"""
self.model_folder = ''
self.vacab_path = vacab_path
self.config_path = config_path
self.checkpoint_path = checkpoint_path
super(BERTEmbedding, self).__init__(task=task,
sequence_length=sequence_length,
embedding_size=0,
processor=processor,
from_saved_model=from_saved_model)
self.bert_type = bert_type
self.processor.token_pad = '[PAD]'
self.processor.token_unk = '[UNK]'
self.processor.token_bos = '[CLS]'
self.processor.token_eos = '[SEP]'
self.processor.add_bos_eos = True
if not from_saved_model:
self._build_token2idx_from_bert()
self._build_model()
def _build_token2idx_from_bert(self):
token2idx = {}
with codecs.open(self.vacab_path, 'r', 'utf8') as reader:
for line in reader:
token = line.strip()
token2idx[token] = len(token2idx)
self.bert_token2idx = token2idx
self._tokenizer = keras_bert.Tokenizer(token2idx)
self.processor.token2idx = self.bert_token2idx
self.processor.idx2token = dict([(value, key) for key, value in token2idx.items()])
def _build_model(self, **kwargs):
if self.embed_model is None:
seq_len = self.sequence_length
if isinstance(seq_len, tuple):
seq_len = seq_len[0]
if isinstance(seq_len, str):
logging.warning(f"Model will be built when sequence length is determined")
return
config_path = self.config_path
config = json.load(open(config_path))
if seq_len > config.get('max_position_embeddings'):
seq_len = config.get('max_position_embeddings')
logging.warning(f"Max seq length is {seq_len}")
bert_model = build_transformer_model(config_path=self.config_path,
checkpoint_path=self.checkpoint_path,
model=self.bert_type,
application='encoder',
return_keras_model=True)
self.embed_model = bert_model
self.embedding_size = int(bert_model.output.shape[-1])
output_features = NonMaskingLayer()(bert_model.output)
self.embed_model = tf.keras.Model(bert_model.inputs, output_features)
if __name__ == "__main__":
# BERT_PATH = '/Users/brikerman/Desktop/nlp/language_models/bert/chinese_L-12_H-768_A-12'
model_folder = '/Users/brikerman/Desktop/nlp/language_models/albert_base'
checkpoint_path = os.path.join(model_folder, 'model.ckpt-best')
config_path = os.path.join(model_folder, 'albert_config.json')
vacab_path = os.path.join(model_folder, 'vocab_chinese.txt')
embed = BERTEmbeddingV2(vacab_path, config_path, checkpoint_path,
bert_type='albert',
task=kashgari.CLASSIFICATION,
sequence_length=100)
x = embed.embed_one(list('今天天气不错'))
print(x)
| 125softNLP | /125softNLP-0.0.1-py3-none-any.whl/pysoftNLP/kashgari/embeddings/bert_embedding_v2.py | bert_embedding_v2.py |
# encoding: utf-8
# author: BrikerMan
# contact: eliyar917@gmail.com
# blog: https://eliyar.biz
# file: stacked_embedding.py
# time: 2019-05-23 09:18
import json
import pydoc
from typing import Union, Optional, Tuple, List, Dict
import numpy as np
import tensorflow as tf
from tensorflow.python import keras
import pysoftNLP.kashgari as kashgari
from pysoftNLP.kashgari.embeddings.base_embedding import Embedding
from pysoftNLP.kashgari.layers import L
from pysoftNLP.kashgari.processors.base_processor import BaseProcessor
class StackedEmbedding(Embedding):
"""Embedding layer without pre-training, train embedding layer while training model"""
@classmethod
def _load_saved_instance(cls,
config_dict: Dict,
model_path: str,
tf_model: keras.Model):
embeddings = []
for embed_info in config_dict['embeddings']:
embed_class = pydoc.locate(f"{embed_info['module']}.{embed_info['class_name']}")
embedding: Embedding = embed_class._load_saved_instance(embed_info,
model_path,
tf_model)
embeddings.append(embedding)
instance = cls(embeddings=embeddings,
from_saved_model=True)
print('----')
print(instance.embeddings)
embed_model_json_str = json.dumps(config_dict['embed_model'])
instance.embed_model = keras.models.model_from_json(embed_model_json_str,
custom_objects=kashgari.custom_objects)
# Load Weights from model
for layer in instance.embed_model.layers:
layer.set_weights(tf_model.get_layer(layer.name).get_weights())
return instance
def info(self):
info = super(StackedEmbedding, self).info()
info['embeddings'] = [embed.info() for embed in self.embeddings]
info['config'] = {}
return info
def __init__(self,
embeddings: List[Embedding],
processor: Optional[BaseProcessor] = None,
from_saved_model: bool = False):
"""
Args:
embeddings:
processor:
"""
task = kashgari.CLASSIFICATION
if all(isinstance(embed.sequence_length, int) for embed in embeddings):
sequence_length = [embed.sequence_length for embed in embeddings]
else:
raise ValueError('Need to set sequence length for all embeddings while using stacked embedding')
super(StackedEmbedding, self).__init__(task=task,
sequence_length=sequence_length[0],
embedding_size=100,
processor=processor,
from_saved_model=from_saved_model)
self.embeddings = embeddings
self.processor = embeddings[0].processor
if not from_saved_model:
self._build_model()
def _build_model(self, **kwargs):
if self.embed_model is None and all(embed.embed_model is not None for embed in self.embeddings):
layer_concatenate = L.Concatenate(name='layer_concatenate')
inputs = []
for embed in self.embeddings:
inputs += embed.embed_model.inputs
# inputs = [embed.embed_model.inputs for embed in self.embeddings]
outputs = layer_concatenate([embed.embed_model.output for embed in self.embeddings])
self.embed_model = tf.keras.Model(inputs, outputs)
def analyze_corpus(self,
x: Union[Tuple[List[List[str]], ...], List[List[str]]],
y: Union[List[List[str]], List[str]]):
for index in range(len(x)):
self.embeddings[index].analyze_corpus(x[index], y)
self._build_model()
def process_x_dataset(self,
data: Tuple[List[List[str]], ...],
subset: Optional[List[int]] = None) -> Tuple[np.ndarray, ...]:
"""
batch process feature data while training
Args:
data: target dataset
subset: subset index list
Returns:
vectorized feature tensor
"""
result = []
for index, dataset in enumerate(data):
x = self.embeddings[index].process_x_dataset(dataset, subset)
if isinstance(x, tuple):
result += list(x)
else:
result.append(x)
return tuple(result)
def process_y_dataset(self,
data: List[List[str]],
subset: Optional[List[int]] = None) -> np.ndarray:
return self.embeddings[0].process_y_dataset(data, subset)
if __name__ == "__main__":
pass
| 125softNLP | /125softNLP-0.0.1-py3-none-any.whl/pysoftNLP/kashgari/embeddings/stacked_embedding.py | stacked_embedding.py |
# encoding: utf-8
# author: BrikerMan
# contact: eliyar917@gmail.com
# blog: https://eliyar.biz
# file: base_embedding.py
# time: 2019-05-25 17:40
import os
os.environ['TF_KERAS'] = '1'
import codecs
import logging
from typing import Union, Optional, Any, List, Tuple
import numpy as np
import pysoftNLP.kashgari as kashgari
import tensorflow as tf
from pysoftNLP.kashgari.layers import NonMaskingLayer
from pysoftNLP.kashgari.embeddings.base_embedding import Embedding
from pysoftNLP.kashgari.processors.base_processor import BaseProcessor
import keras_bert
class BERTEmbedding(Embedding):
"""Pre-trained BERT embedding"""
def info(self):
info = super(BERTEmbedding, self).info()
info['config'] = {
'model_folder': self.model_folder,
'sequence_length': self.sequence_length
}
return info
def __init__(self,
model_folder: str,
layer_nums: int = 4,
trainable: bool = False,
task: str = None,
sequence_length: Union[str, int] = 'auto',
processor: Optional[BaseProcessor] = None,
from_saved_model: bool = False):
"""
Args:
task:
model_folder:
layer_nums: number of layers whose outputs will be concatenated into a single tensor,
default `4`, output the last 4 hidden layers as the thesis suggested
trainable: whether if the model is trainable, default `False` and set it to `True`
for fine-tune this embedding layer during your training
sequence_length:
processor:
from_saved_model:
"""
self.trainable = trainable
# Do not need to train the whole bert model if just to use its feature output
self.training = False
self.layer_nums = layer_nums
if isinstance(sequence_length, tuple):
raise ValueError('BERT embedding only accept `int` type `sequence_length`')
if sequence_length == 'variable':
raise ValueError('BERT embedding only accept sequences in equal length')
super(BERTEmbedding, self).__init__(task=task,
sequence_length=sequence_length,
embedding_size=0,
processor=processor,
from_saved_model=from_saved_model)
self.processor.token_pad = '[PAD]'
self.processor.token_unk = '[UNK]'
self.processor.token_bos = '[CLS]'
self.processor.token_eos = '[SEP]'
self.processor.add_bos_eos = True
self.model_folder = model_folder
if not from_saved_model:
self._build_token2idx_from_bert()
self._build_model()
def _build_token2idx_from_bert(self):
dict_path = os.path.join(self.model_folder, 'vocab.txt')
token2idx = {}
with codecs.open(dict_path, 'r', 'utf8') as reader:
for line in reader:
token = line.strip()
token2idx[token] = len(token2idx)
self.bert_token2idx = token2idx
self._tokenizer = keras_bert.Tokenizer(token2idx)
self.processor.token2idx = self.bert_token2idx
self.processor.idx2token = dict([(value, key) for key, value in token2idx.items()])
def _build_model(self, **kwargs):
if self.embed_model is None:
seq_len = self.sequence_length
if isinstance(seq_len, tuple):
seq_len = seq_len[0]
if isinstance(seq_len, str):
logging.warning(f"Model will be built until sequence length is determined")
return
config_path = os.path.join(self.model_folder, 'bert_config.json')
check_point_path = os.path.join(self.model_folder, 'bert_model.ckpt')
bert_model = keras_bert.load_trained_model_from_checkpoint(config_path,
check_point_path,
seq_len=seq_len,
output_layer_num=self.layer_nums,
training=self.training,
trainable=self.trainable)
self._model = tf.keras.Model(bert_model.inputs, bert_model.output)
bert_seq_len = int(bert_model.output.shape[1])
if bert_seq_len < seq_len:
logging.warning(f"Sequence length limit set to {bert_seq_len} by pre-trained model")
self.sequence_length = bert_seq_len
self.embedding_size = int(bert_model.output.shape[-1])
output_features = NonMaskingLayer()(bert_model.output)
self.embed_model = tf.keras.Model(bert_model.inputs, output_features)
logging.warning(f'seq_len: {self.sequence_length}')
def analyze_corpus(self,
x: Union[Tuple[List[List[str]], ...], List[List[str]]],
y: Union[List[List[Any]], List[Any]]):
"""
Prepare embedding layer and pre-processor for labeling task
Args:
x:
y:
Returns:
"""
if len(self.processor.token2idx) == 0:
self._build_token2idx_from_bert()
super(BERTEmbedding, self).analyze_corpus(x, y)
def embed(self,
sentence_list: Union[Tuple[List[List[str]], ...], List[List[str]]],
debug: bool = False) -> np.ndarray:
"""
batch embed sentences
Args:
sentence_list: Sentence list to embed
debug: show debug log
Returns:
vectorized sentence list
"""
if self.embed_model is None:
raise ValueError('need to build model for embed sentence')
tensor_x = self.process_x_dataset(sentence_list)
if debug:
logging.debug(f'sentence tensor: {tensor_x}')
embed_results = self.embed_model.predict(tensor_x)
return embed_results
def process_x_dataset(self,
data: Union[Tuple[List[List[str]], ...], List[List[str]]],
subset: Optional[List[int]] = None) -> Tuple[np.ndarray, ...]:
"""
batch process feature data while training
Args:
data: target dataset
subset: subset index list
Returns:
vectorized feature tensor
"""
x1 = None
if isinstance(data, tuple):
if len(data) == 2:
x0 = self.processor.process_x_dataset(data[0], self.sequence_length, subset)
x1 = self.processor.process_x_dataset(data[1], self.sequence_length, subset)
else:
x0 = self.processor.process_x_dataset(data[0], self.sequence_length, subset)
else:
x0 = self.processor.process_x_dataset(data, self.sequence_length, subset)
if x1 is None:
x1 = np.zeros(x0.shape, dtype=np.int32)
return x0, x1
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG)
# bert_model_path = os.path.join(utils.get_project_path(), 'tests/test-data/bert')
b = BERTEmbedding(task=kashgari.CLASSIFICATION,
model_folder='/Users/brikerman/.kashgari/embedding/bert/chinese_L-12_H-768_A-12',
sequence_length=12)
from kashgari.corpus import SMP2018ECDTCorpus
test_x, test_y = SMP2018ECDTCorpus.load_data('valid')
b.analyze_corpus(test_x, test_y)
data1 = 'all work and no play makes'.split(' ')
data2 = '你 好 啊'.split(' ')
r = b.embed([data1], True)
tokens = b.process_x_dataset([['语', '言', '模', '型']])[0]
target_index = [101, 6427, 6241, 3563, 1798, 102]
target_index = target_index + [0] * (12 - len(target_index))
assert list(tokens[0]) == list(target_index)
print(tokens)
print(r)
print(r.shape)
| 125softNLP | /125softNLP-0.0.1-py3-none-any.whl/pysoftNLP/kashgari/embeddings/bert_embedding.py | bert_embedding.py |
# encoding: utf-8
# author: BrikerMan
# contact: eliyar917@gmail.com
# blog: https://eliyar.biz
# file: w2v_embedding.py
# time: 2019-05-20 17:32
import logging
from typing import Union, Optional, Dict, Any, List, Tuple
import numpy as np
from gensim.models import KeyedVectors
from tensorflow import keras
from pysoftNLP.kashgari.embeddings.base_embedding import Embedding
from pysoftNLP.kashgari.processors.base_processor import BaseProcessor
L = keras.layers
class WordEmbedding(Embedding):
"""Pre-trained word2vec embedding"""
def info(self):
info = super(WordEmbedding, self).info()
info['config'] = {
'w2v_path': self.w2v_path,
'w2v_kwargs': self.w2v_kwargs,
'sequence_length': self.sequence_length
}
return info
def __init__(self,
w2v_path: str,
task: str = None,
w2v_kwargs: Dict[str, Any] = None,
sequence_length: Union[Tuple[int, ...], str, int] = 'auto',
processor: Optional[BaseProcessor] = None,
from_saved_model: bool = False):
"""
Args:
task:
w2v_path: word2vec file path
w2v_kwargs: params pass to the ``load_word2vec_format()`` function of ``gensim.models.KeyedVectors`` -
https://radimrehurek.com/gensim/models/keyedvectors.html#module-gensim.models.keyedvectors
sequence_length: ``'auto'``, ``'variable'`` or integer. When using ``'auto'``, use the 95% of corpus length
as sequence length. When using ``'variable'``, model input shape will set to None, which can handle
various length of input, it will use the length of max sequence in every batch for sequence length.
If using an integer, let's say ``50``, the input output sequence length will set to 50.
processor:
"""
if w2v_kwargs is None:
w2v_kwargs = {}
self.w2v_path = w2v_path
self.w2v_kwargs = w2v_kwargs
self.w2v_model_loaded = False
super(WordEmbedding, self).__init__(task=task,
sequence_length=sequence_length,
embedding_size=0,
processor=processor,
from_saved_model=from_saved_model)
if not from_saved_model:
self._build_token2idx_from_w2v()
if self.sequence_length != 'auto':
self._build_model()
def _build_token2idx_from_w2v(self):
w2v = KeyedVectors.load_word2vec_format(self.w2v_path, **self.w2v_kwargs)
token2idx = {
self.processor.token_pad: 0,
self.processor.token_unk: 1,
self.processor.token_bos: 2,
self.processor.token_eos: 3
}
for token in w2v.index2word:
token2idx[token] = len(token2idx)
vector_matrix = np.zeros((len(token2idx), w2v.vector_size))
vector_matrix[1] = np.random.rand(w2v.vector_size)
vector_matrix[4:] = w2v.vectors
self.embedding_size = w2v.vector_size
self.w2v_vector_matrix = vector_matrix
self.w2v_token2idx = token2idx
self.w2v_top_words = w2v.index2entity[:50]
self.w2v_model_loaded = True
self.processor.token2idx = self.w2v_token2idx
self.processor.idx2token = dict([(value, key) for key, value in self.w2v_token2idx.items()])
logging.debug('------------------------------------------------')
logging.debug('Loaded gensim word2vec model')
logging.debug('model : {}'.format(self.w2v_path))
logging.debug('word count : {}'.format(len(self.w2v_vector_matrix)))
logging.debug('Top 50 word : {}'.format(self.w2v_top_words))
logging.debug('------------------------------------------------')
def _build_model(self, **kwargs):
if self.token_count == 0:
logging.debug('need to build after build_word2idx')
else:
input_tensor = L.Input(shape=(self.sequence_length,),
name=f'input')
layer_embedding = L.Embedding(self.token_count,
self.embedding_size,
weights=[self.w2v_vector_matrix],
trainable=False,
name=f'layer_embedding')
embedded_tensor = layer_embedding(input_tensor)
self.embed_model = keras.Model(input_tensor, embedded_tensor)
def analyze_corpus(self,
x: Union[Tuple[List[List[str]], ...], List[List[str]]],
y: Union[List[List[Any]], List[Any]]):
"""
Prepare embedding layer and pre-processor for labeling task
Args:
x:
y:
Returns:
"""
if not self.w2v_model_loaded:
self._build_token2idx_from_w2v()
super(WordEmbedding, self).analyze_corpus(x, y)
if __name__ == "__main__":
print('hello world')
| 125softNLP | /125softNLP-0.0.1-py3-none-any.whl/pysoftNLP/kashgari/embeddings/word_embedding.py | word_embedding.py |
# encoding: utf-8
# author: BrikerMan
# contact: eliyar917@gmail.com
# blog: https://eliyar.biz
# file: bare_embedding.py
# time: 2019-05-20 10:36
import logging
from typing import Union, Optional
from tensorflow import keras
from pysoftNLP.kashgari.embeddings.base_embedding import Embedding
from pysoftNLP.kashgari.processors.base_processor import BaseProcessor
L = keras.layers
# Todo: A better name for this class
class BareEmbedding(Embedding):
"""Embedding layer without pre-training, train embedding layer while training model"""
def __init__(self,
task: str = None,
sequence_length: Union[int, str] = 'auto',
embedding_size: int = 100,
processor: Optional[BaseProcessor] = None,
from_saved_model: bool = False):
"""
Init bare embedding (embedding without pre-training)
Args:
sequence_length: ``'auto'``, ``'variable'`` or integer. When using ``'auto'``, use the 95% of corpus length
as sequence length. When using ``'variable'``, model input shape will set to None, which can handle
various length of input, it will use the length of max sequence in every batch for sequence length.
If using an integer, let's say ``50``, the input output sequence length will set to 50.
embedding_size: Dimension of the dense embedding.
"""
super(BareEmbedding, self).__init__(task=task,
sequence_length=sequence_length,
embedding_size=embedding_size,
processor=processor,
from_saved_model=from_saved_model)
if not from_saved_model:
self._build_model()
def _build_model(self, **kwargs):
if self.sequence_length == 0 or \
self.sequence_length == 'auto' or \
self.token_count == 0:
logging.debug('need to build after build_word2idx')
else:
input_tensor = L.Input(shape=(self.sequence_length,),
name=f'input')
layer_embedding = L.Embedding(self.token_count,
self.embedding_size,
name=f'layer_embedding')
embedded_tensor = layer_embedding(input_tensor)
self.embed_model = keras.Model(input_tensor, embedded_tensor)
if __name__ == "__main__":
print('hello world')
| 125softNLP | /125softNLP-0.0.1-py3-none-any.whl/pysoftNLP/kashgari/embeddings/bare_embedding.py | bare_embedding.py |
# encoding: utf-8
# author: BrikerMan
# contact: eliyar917@gmail.com
# blog: https://eliyar.biz
# file: numeric_feature_embedding.py
# time: 2019-05-23 09:04
from typing import Union, Optional, Tuple, List
import numpy as np
from tensorflow import keras
from tensorflow.python.keras.preprocessing.sequence import pad_sequences
import pysoftNLP.kashgari as kashgari
from pysoftNLP.kashgari.embeddings.base_embedding import Embedding
from pysoftNLP.kashgari.processors.base_processor import BaseProcessor
L = keras.layers
# Todo: A better name for this class
class NumericFeaturesEmbedding(Embedding):
"""Embedding layer without pre-training, train embedding layer while training model"""
def info(self):
info = super(NumericFeaturesEmbedding, self).info()
info['config'] = {
'feature_count': self.feature_count,
'feature_name': self.feature_name,
'sequence_length': self.sequence_length,
'embedding_size': self.embedding_size
}
return info
def __init__(self,
feature_count: int,
feature_name: str,
sequence_length: Union[str, int] = 'auto',
embedding_size: int = None,
processor: Optional[BaseProcessor] = None,
from_saved_model: bool = False):
"""
Init bare embedding (embedding without pre-training)
Args:
sequence_length: ``'auto'``, ``'variable'`` or integer. When using ``'auto'``, use the 95% of corpus length
as sequence length. When using ``'variable'``, model input shape will set to None, which can handle
various length of input, it will use the length of max sequence in every batch for sequence length.
If using an integer, let's say ``50``, the input output sequence length will set to 50.
embedding_size: Dimension of the dense embedding.
"""
# Dummy Type
task = kashgari.CLASSIFICATION
if embedding_size is None:
embedding_size = feature_count * 8
super(NumericFeaturesEmbedding, self).__init__(task=task,
sequence_length=sequence_length,
embedding_size=embedding_size,
processor=processor,
from_saved_model=from_saved_model)
self.feature_count = feature_count
self.feature_name = feature_name
if not from_saved_model:
self._build_model()
def _build_model(self, **kwargs):
input_tensor = L.Input(shape=(self.sequence_length,),
name=f'input_{self.feature_name}')
layer_embedding = L.Embedding(self.feature_count + 1,
self.embedding_size,
name=f'layer_embedding_{self.feature_name}')
embedded_tensor = layer_embedding(input_tensor)
self.embed_model = keras.Model(input_tensor, embedded_tensor)
def analyze_corpus(self,
x: Union[Tuple[List[List[str]], ...], List[List[str]]],
y: Union[List[List[str]], List[str]]):
pass
def process_x_dataset(self,
data: List[List[str]],
subset: Optional[List[int]] = None) -> Tuple[np.ndarray, ...]:
"""
batch process feature data while training
Args:
data: target dataset
subset: subset index list
Returns:
vectorized feature tensor
"""
if subset is not None:
numerized_samples = kashgari.utils.get_list_subset(data, subset)
else:
numerized_samples = data
return pad_sequences(numerized_samples, self.sequence_length, padding='post', truncating='post')
if __name__ == "__main__":
e = NumericFeaturesEmbedding(2, feature_name='is_bold', sequence_length=10)
e.embed_model.summary()
print(e.embed_one([1, 2]))
print("Hello world")
| 125softNLP | /125softNLP-0.0.1-py3-none-any.whl/pysoftNLP/kashgari/embeddings/numeric_feature_embedding.py | numeric_feature_embedding.py |
# encoding: utf-8
# author: BrikerMan
# contact: eliyar917@gmail.com
# blog: https://eliyar.biz
# file: __init__.py.py
# time: 2019-05-20 11:21
from pysoftNLP.kashgari.embeddings.bare_embedding import BareEmbedding
from pysoftNLP.kashgari.embeddings.bert_embedding import BERTEmbedding
from pysoftNLP.kashgari.embeddings.word_embedding import WordEmbedding
from pysoftNLP.kashgari.embeddings.numeric_feature_embedding import NumericFeaturesEmbedding
from pysoftNLP.kashgari.embeddings.stacked_embedding import StackedEmbedding
from pysoftNLP.kashgari.embeddings.gpt_2_embedding import GPT2Embedding
| 125softNLP | /125softNLP-0.0.1-py3-none-any.whl/pysoftNLP/kashgari/embeddings/__init__.py | __init__.py |
# encoding: utf-8
# author: AlexWang
# contact: ialexwwang@gmail.com
# file: attention_weighted_average.py
# time: 2019-06-25 16:35
import pysoftNLP.kashgari as kashgari
import tensorflow as tf
from tensorflow.python import keras
from tensorflow.python.keras import backend as K
L = keras.layers
InputSpec = L.InputSpec
class KMaxPoolingLayer(L.Layer):
'''
K-max pooling layer that extracts the k-highest activation from a sequence (2nd dimension).
TensorFlow backend.
# Arguments
k: An int scale,
indicate k max steps of features to pool.
sorted: A bool,
if output is sorted (default) or not.
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, steps, features)` while `channels_first`
corresponds to inputs with shape
`(batch, features, steps)`.
# Input shape
- If `data_format='channels_last'`:
3D tensor with shape:
`(batch_size, steps, features)`
- If `data_format='channels_first'`:
3D tensor with shape:
`(batch_size, features, steps)`
# Output shape
3D tensor with shape:
`(batch_size, top-k-steps, features)`
'''
def __init__(self, k=1, sorted=True, data_format='channels_last', **kwargs): # noqa: A002
super(KMaxPoolingLayer, self).__init__(**kwargs)
self.input_spec = InputSpec(ndim=3)
self.k = k
self.sorted = sorted
if data_format.lower() in ['channels_first', 'channels_last']:
self.data_format = data_format.lower()
else:
self.data_format = K.image_data_format()
def compute_output_shape(self, input_shape):
if self.data_format == 'channels_first':
return (input_shape[0], self.k, input_shape[1])
else:
return (input_shape[0], self.k, input_shape[2])
def call(self, inputs):
if self.data_format == 'channels_last':
# swap last two dimensions since top_k will be applied along the last dimension
shifted_input = tf.transpose(inputs, [0, 2, 1])
# extract top_k, returns two tensors [values, indices]
top_k = tf.nn.top_k(shifted_input, k=self.k, sorted=self.sorted)[0]
else:
top_k = tf.nn.top_k(inputs, k=self.k, sorted=self.sorted)[0]
# return flattened output
return tf.transpose(top_k, [0, 2, 1])
def get_config(self):
config = {'k': self.k,
'sorted': self.sorted,
'data_format': self.data_format}
base_config = super(KMaxPoolingLayer, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
KMaxPooling = KMaxPoolingLayer
KMaxPoolLayer = KMaxPoolingLayer
kashgari.custom_objects['KMaxPoolingLayer'] = KMaxPoolingLayer
kashgari.custom_objects['KMaxPooling'] = KMaxPooling
kashgari.custom_objects['KMaxPoolLayer'] = KMaxPoolLayer
if __name__ == '__main__':
print('Hello world, KMaxPoolLayer/KMaxPoolingLayer.')
| 125softNLP | /125softNLP-0.0.1-py3-none-any.whl/pysoftNLP/kashgari/layers/kmax_pool_layer.py | kmax_pool_layer.py |
# encoding: utf-8
# author: AlexWang
# contact: ialexwwang@gmail.com
# file: attention_weighted_average.py
# time: 2019-06-24 19:35
from tensorflow.python import keras
from tensorflow.python.keras import backend as K
import pysoftNLP.kashgari as kashgari
L = keras.layers
initializers = keras.initializers
InputSpec = L.InputSpec
class AttentionWeightedAverageLayer(L.Layer):
'''
Computes a weighted average of the different channels across timesteps.
Uses 1 parameter pr. channel to compute the attention value for a single timestep.
'''
def __init__(self, return_attention=False, **kwargs):
self.init = initializers.get('uniform')
self.supports_masking = True
self.return_attention = return_attention
super(AttentionWeightedAverageLayer, self).__init__(**kwargs)
def build(self, input_shape):
self.input_spec = [InputSpec(ndim=3)]
assert len(input_shape) == 3
self.W = self.add_weight(shape=(input_shape[2].value, 1),
name='{}_w'.format(self.name),
initializer=self.init,
trainable=True
)
# self.trainable_weights = [self.W]
super(AttentionWeightedAverageLayer, self).build(input_shape)
def call(self, x, mask=None):
# computes a probability distribution over the timesteps
# uses 'max trick' for numerical stability
# reshape is done to avoid issue with Tensorflow
# and 1-dimensional weights
logits = K.dot(x, self.W)
x_shape = K.shape(x)
logits = K.reshape(logits, (x_shape[0], x_shape[1]))
ai = K.exp(logits - K.max(logits, axis=-1, keepdims=True))
# masked timesteps have zero weight
if mask is not None:
mask = K.cast(mask, K.floatx())
ai = ai * mask
att_weights = ai / (K.sum(ai, axis=1, keepdims=True) + K.epsilon())
weighted_input = x * K.expand_dims(att_weights)
result = K.sum(weighted_input, axis=1)
if self.return_attention:
return [result, att_weights]
return result
def get_output_shape_for(self, input_shape):
return self.compute_output_shape(input_shape)
def compute_output_shape(self, input_shape):
output_len = input_shape[2]
if self.return_attention:
return [(input_shape[0], output_len), (input_shape[0], input_shape[1])]
return (input_shape[0], output_len)
def compute_mask(self, inputs, input_mask=None):
if isinstance(input_mask, list):
return [None] * len(input_mask)
else:
return None
def get_config(self):
config = {'return_attention': self.return_attention, }
base_config = super(AttentionWeightedAverageLayer, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
AttentionWeightedAverage = AttentionWeightedAverageLayer
AttWgtAvgLayer = AttentionWeightedAverageLayer
kashgari.custom_objects['AttentionWeightedAverageLayer'] = AttentionWeightedAverageLayer
kashgari.custom_objects['AttentionWeightedAverage'] = AttentionWeightedAverage
kashgari.custom_objects['AttWgtAvgLayer'] = AttWgtAvgLayer
if __name__ == '__main__':
print('Hello world, AttentionWeightedAverageLayer/AttWgtAvgLayer.')
| 125softNLP | /125softNLP-0.0.1-py3-none-any.whl/pysoftNLP/kashgari/layers/att_wgt_avg_layer.py | att_wgt_avg_layer.py |
# encoding: utf-8
# author: BrikerMan
# contact: eliyar917@gmail.com
# blog: https://eliyar.biz
# file: crf.py
# time: 2019-06-28 14:33
import tensorflow as tf
class CRF(tf.keras.layers.Layer):
"""
Conditional Random Field layer (tf.keras)
`CRF` can be used as the last layer in a network (as a classifier). Input shape (features)
must be equal to the number of classes the CRF can predict (a linear layer is recommended).
Note: the loss and accuracy functions of networks using `CRF` must
use the provided loss and accuracy functions (denoted as loss and viterbi_accuracy)
as the classification of sequences are used with the layers internal weights.
Args:
output_dim (int): the number of labels to tag each temporal input.
Input shape:
nD tensor with shape `(batch_size, sentence length, num_classes)`.
Output shape:
nD tensor with shape: `(batch_size, sentence length, num_classes)`.
"""
def __init__(self,
output_dim,
mode='reg',
supports_masking=False,
transitions=None,
**kwargs):
self.transitions = None
super(CRF, self).__init__(**kwargs)
self.output_dim = int(output_dim)
self.mode = mode
if self.mode == 'pad':
self.input_spec = [tf.keras.layers.InputSpec(min_ndim=3), tf.keras.layers.InputSpec(min_ndim=2)]
elif self.mode == 'reg':
self.input_spec = tf.keras.layers.InputSpec(min_ndim=3)
else:
raise ValueError
self.supports_masking = supports_masking
self.sequence_lengths = None
def get_config(self):
config = {
'output_dim': self.output_dim,
'mode': self.mode,
'supports_masking': self.supports_masking,
'transitions': tf.keras.backend.eval(self.transitions)
}
base_config = super(CRF, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def build(self, input_shape):
if self.mode == 'pad':
assert len(input_shape) == 2
assert len(input_shape[0]) == 3
assert len(input_shape[1]) == 2
f_shape = tf.TensorShape(input_shape[0])
input_spec = [tf.keras.layers.InputSpec(min_ndim=3, axes={-1: f_shape[-1]}),
tf.keras.layers.InputSpec(min_ndim=2, axes={-1: 1}, dtype=tf.int32)]
else:
assert len(input_shape) == 3
f_shape = tf.TensorShape(input_shape)
input_spec = tf.keras.layers.InputSpec(min_ndim=3, axes={-1: f_shape[-1]})
if f_shape[-1] is None:
raise ValueError('The last dimension of the inputs to `CRF` should be defined. Found `None`.')
if f_shape[-1] != self.output_dim:
raise ValueError('The last dimension of the input shape must be equal to output shape. '
'Use a linear layer if needed.')
self.input_spec = input_spec
self.transitions = self.add_weight(name='transitions',
shape=[self.output_dim, self.output_dim],
initializer='glorot_uniform',
trainable=True)
self.built = True
def call(self, inputs, **kwargs):
if self.mode == 'pad':
sequences = tf.convert_to_tensor(inputs[0], dtype=self.dtype)
self.sequence_lengths = tf.keras.backend.flatten(inputs[-1])
else:
sequences = tf.convert_to_tensor(inputs, dtype=self.dtype)
shape = tf.shape(inputs)
self.sequence_lengths = tf.ones(shape[0], dtype=tf.int32) * (shape[1])
viterbi_sequence, _ = tf.contrib.crf.crf_decode(sequences, self.transitions,
self.sequence_lengths)
output = tf.keras.backend.one_hot(viterbi_sequence, self.output_dim)
return tf.keras.backend.in_train_phase(sequences, output)
def loss(self, y_true, y_pred):
y_pred = tf.convert_to_tensor(y_pred, dtype=self.dtype)
log_likelihood, self.transitions = tf.contrib.crf.crf_log_likelihood(y_pred,
tf.cast(tf.keras.backend.argmax(y_true),
dtype=tf.int32),
self.sequence_lengths,
transition_params=self.transitions)
return tf.reduce_mean(-log_likelihood)
def compute_output_shape(self, input_shape):
if self.mode == 'pad':
data_shape = input_shape[0]
else:
data_shape = input_shape
tf.TensorShape(data_shape).assert_has_rank(3)
return data_shape[:2] + (self.output_dim,)
@property
def viterbi_accuracy(self):
def accuracy(y_true, y_pred):
shape = tf.shape(y_pred)
sequence_lengths = tf.ones(shape[0], dtype=tf.int32) * (shape[1])
viterbi_sequence, _ = tf.contrib.crf.crf_decode(y_pred, self.transitions, sequence_lengths)
output = tf.keras.backend.one_hot(viterbi_sequence, self.output_dim)
return tf.keras.metrics.categorical_accuracy(y_true, output)
accuracy.func_name = 'viterbi_accuracy'
return accuracy
| 125softNLP | /125softNLP-0.0.1-py3-none-any.whl/pysoftNLP/kashgari/layers/crf.py | crf.py |
# encoding: utf-8
# author: BrikerMan
# contact: eliyar917@gmail.com
# blog: https://eliyar.biz
# file: non_masking_layer.py
# time: 2019-05-23 14:05
import pysoftNLP.kashgari as kashgari
from tensorflow.python.keras.layers import Layer
class NonMaskingLayer(Layer):
"""
fix convolutional 1D can't receive masked input, detail: https://github.com/keras-team/keras/issues/4978
thanks for https://github.com/jacoxu
"""
def __init__(self, **kwargs):
self.supports_masking = True
super(NonMaskingLayer, self).__init__(**kwargs)
def build(self, input_shape):
pass
def compute_mask(self, inputs, input_mask=None):
# do not pass the mask to the next layers
return None
def call(self, x, mask=None):
return x
kashgari.custom_objects['NonMaskingLayer'] = NonMaskingLayer
if __name__ == "__main__":
print("Hello world")
| 125softNLP | /125softNLP-0.0.1-py3-none-any.whl/pysoftNLP/kashgari/layers/non_masking_layer.py | non_masking_layer.py |
# encoding: utf-8
# author: BrikerMan
# contact: eliyar917@gmail.com
# blog: https://eliyar.biz
# file: __init__.py
# time: 2019-05-23 14:05
import tensorflow as tf
from tensorflow.python import keras
from pysoftNLP.kashgari.layers.non_masking_layer import NonMaskingLayer
from pysoftNLP.kashgari.layers.att_wgt_avg_layer import AttentionWeightedAverageLayer
from pysoftNLP.kashgari.layers.att_wgt_avg_layer import AttentionWeightedAverage, AttWgtAvgLayer
from pysoftNLP.kashgari.layers.kmax_pool_layer import KMaxPoolingLayer, KMaxPoolLayer, KMaxPooling
L = keras.layers
if __name__ == "__main__":
print("Hello world")
| 125softNLP | /125softNLP-0.0.1-py3-none-any.whl/pysoftNLP/kashgari/layers/__init__.py | __init__.py |
# encoding: utf-8
# author: BrikerMan
# contact: eliyar917@gmail.com
# blog: https://eliyar.biz
# file: scoring_processor.py
# time: 11:10 上午
from typing import List, Optional
import numpy as np
import pysoftNLP.kashgari as kashgari
from pysoftNLP.kashgari import utils
from pysoftNLP.kashgari.processors.base_processor import BaseProcessor
def is_numeric(obj):
attrs = ['__add__', '__sub__', '__mul__', '__truediv__', '__pow__']
return all(hasattr(obj, attr) for attr in attrs)
class ScoringProcessor(BaseProcessor):
"""
Corpus Pre Processor class
"""
def __init__(self, output_dim=None, **kwargs):
super(ScoringProcessor, self).__init__(**kwargs)
self.output_dim = output_dim
def info(self):
info = super(ScoringProcessor, self).info()
info['task'] = kashgari.SCORING
return info
def _build_label_dict(self,
label_list: List[List[float]]):
"""
Build label2idx dict for sequence labeling task
Args:
label_list: corpus label list
"""
if self.output_dim is None:
label_sample = label_list[0]
if isinstance(label_sample, np.ndarray) and len(label_sample.shape) == 1:
self.output_dim = label_sample.shape[0]
elif is_numeric(label_sample):
self.output_dim = 1
elif isinstance(label_sample, list):
self.output_dim = len(label_sample)
else:
raise ValueError('Scoring Label Sample must be a float, float array or 1D numpy array')
# np_labels = np.array(label_list)
# if np_labels.max() > 1 or np_labels.min() < 0:
# raise ValueError('Scoring Label Sample must be in range[0,1]')
def process_y_dataset(self,
data: List[List[str]],
max_len: Optional[int] = None,
subset: Optional[List[int]] = None) -> np.ndarray:
if subset is not None:
target = utils.get_list_subset(data, subset)
else:
target = data[:]
y = np.array(target)
return y
def numerize_token_sequences(self,
sequences: List[List[str]]):
result = []
for seq in sequences:
if self.add_bos_eos:
seq = [self.token_bos] + seq + [self.token_eos]
unk_index = self.token2idx[self.token_unk]
result.append([self.token2idx.get(token, unk_index) for token in seq])
return result
def numerize_label_sequences(self,
sequences: List[List[str]]) -> List[List[int]]:
return sequences
def reverse_numerize_label_sequences(self,
sequences,
lengths=None):
return sequences
if __name__ == "__main__":
from kashgari.corpus import SMP2018ECDTCorpus
x, y = SMP2018ECDTCorpus.load_data()
x = x[:3]
y = [0.2, 0.3, 0.2]
p = ScoringProcessor()
p.analyze_corpus(x, y)
print(p.process_y_dataset(y))
| 125softNLP | /125softNLP-0.0.1-py3-none-any.whl/pysoftNLP/kashgari/processors/scoring_processor.py | scoring_processor.py |
# encoding: utf-8
# author: BrikerMan
# contact: eliyar917@gmail.com
# blog: https://eliyar.biz
# file: base_processor.py
# time: 2019-05-21 11:27
import collections
import logging
import operator
from typing import List, Optional, Union, Dict, Any
import numpy as np
from tensorflow.python.keras.preprocessing.sequence import pad_sequences
from pysoftNLP.kashgari import utils
class BaseProcessor(object):
"""
Corpus Pre Processor class
"""
def __init__(self, **kwargs):
self.token2idx: Dict[str, int] = kwargs.get('token2idx', {})
self.idx2token: Dict[int, str] = dict([(v, k) for (k, v) in self.token2idx.items()])
self.token2count: Dict = {}
self.label2idx: Dict[str, int] = kwargs.get('label2idx', {})
self.idx2label: Dict[int, str] = dict([(v, k) for (k, v) in self.label2idx.items()])
self.token_pad: str = kwargs.get('token_pad', '<PAD>')
self.token_unk: str = kwargs.get('token_unk', '<UNK>')
self.token_bos: str = kwargs.get('token_bos', '<BOS>')
self.token_eos: str = kwargs.get('token_eos', '<EOS>')
self.dataset_info: Dict[str, Any] = kwargs.get('dataset_info', {})
self.add_bos_eos: bool = kwargs.get('add_bos_eos', False)
self.sequence_length = kwargs.get('sequence_length', None)
self.min_count = kwargs.get('min_count', 3)
def info(self):
return {
'class_name': self.__class__.__name__,
'config': {
'label2idx': self.label2idx,
'token2idx': self.token2idx,
'token_pad': self.token_pad,
'token_unk': self.token_unk,
'token_bos': self.token_bos,
'token_eos': self.token_eos,
'dataset_info': self.dataset_info,
'add_bos_eos': self.add_bos_eos,
'sequence_length': self.sequence_length
},
'module': self.__class__.__module__,
}
def analyze_corpus(self,
corpus: Union[List[List[str]]],
labels: Union[List[List[str]], List[str]],
force: bool = False):
rec_len = sorted([len(seq) for seq in corpus])[int(0.95 * len(corpus))]
self.dataset_info['RECOMMEND_LEN'] = rec_len
if len(self.token2idx) == 0 or force:
self._build_token_dict(corpus, self.min_count)
if len(self.label2idx) == 0 or force:
self._build_label_dict(labels)
def _build_token_dict(self, corpus: List[List[str]], min_count: int = 3):
"""
Build token index dictionary using corpus
Args:
corpus: List of tokenized sentences, like ``[['I', 'love', 'tf'], ...]``
min_count:
"""
token2idx = {
self.token_pad: 0,
self.token_unk: 1,
self.token_bos: 2,
self.token_eos: 3
}
token2count = {}
for sentence in corpus:
for token in sentence:
count = token2count.get(token, 0)
token2count[token] = count + 1
self.token2count = token2count
# 按照词频降序排序
sorted_token2count = sorted(token2count.items(),
key=operator.itemgetter(1),
reverse=True)
token2count = collections.OrderedDict(sorted_token2count)
for token, token_count in token2count.items():
if token not in token2idx and token_count >= min_count:
token2idx[token] = len(token2idx)
self.token2idx = token2idx
self.idx2token = dict([(value, key)
for key, value in self.token2idx.items()])
logging.debug(f"build token2idx dict finished, contains {len(self.token2idx)} tokens.")
self.dataset_info['token_count'] = len(self.token2idx)
def _build_label_dict(self, corpus: Union[List[List[str]], List[str]]):
raise NotImplementedError
def process_x_dataset(self,
data: List[List[str]],
max_len: Optional[int] = None,
subset: Optional[List[int]] = None) -> np.ndarray:
if max_len is None:
max_len = self.sequence_length
if subset is not None:
target = utils.get_list_subset(data, subset)
else:
target = data
numerized_samples = self.numerize_token_sequences(target)
return pad_sequences(numerized_samples, max_len, padding='post', truncating='post')
def process_y_dataset(self,
data: Union[List[List[str]], List[str]],
max_len: Optional[int],
subset: Optional[List[int]] = None) -> np.ndarray:
raise NotImplementedError
def numerize_token_sequences(self,
sequences: List[List[str]]):
raise NotImplementedError
def numerize_label_sequences(self,
sequences: List[List[str]]) -> List[List[int]]:
raise NotImplementedError
def reverse_numerize_label_sequences(self, sequence, **kwargs):
raise NotImplementedError
def __repr__(self):
return f"<{self.__class__}>"
def __str__(self):
return self.__repr__()
if __name__ == "__main__":
print("Hello world")
| 125softNLP | /125softNLP-0.0.1-py3-none-any.whl/pysoftNLP/kashgari/processors/base_processor.py | base_processor.py |
# encoding: utf-8
# author: BrikerMan
# contact: eliyar917@gmail.com
# blog: https://eliyar.biz
# version: 1.0
# license: Apache Licence
# file: corpus.py
# time: 2019-05-17 11:28
import collections
import logging
import operator
from typing import List, Dict, Optional
import numpy as np
from tensorflow.python.keras.preprocessing.sequence import pad_sequences
from tensorflow.python.keras.utils import to_categorical
import pysoftNLP.kashgari as kashgari
from pysoftNLP.kashgari import utils
from pysoftNLP.kashgari.processors.base_processor import BaseProcessor
class LabelingProcessor(BaseProcessor):
"""
Corpus Pre Processor class
"""
def info(self):
info = super(LabelingProcessor, self).info()
info['task'] = kashgari.LABELING
return info
def _build_label_dict(self,
label_list: List[List[str]]):
"""
Build label2idx dict for sequence labeling task
Args:
label_list: corpus label list
"""
label2idx: Dict[str: int] = {
self.token_pad: 0
}
token2count = {}
for sequence in label_list:
for label in sequence:
count = token2count.get(label, 0)
token2count[label] = count + 1
sorted_token2count = sorted(token2count.items(),
key=operator.itemgetter(1),
reverse=True)
token2count = collections.OrderedDict(sorted_token2count)
for token in token2count.keys():
if token not in label2idx:
label2idx[token] = len(label2idx)
self.label2idx = label2idx
self.idx2label = dict([(value, key)
for key, value in self.label2idx.items()])
logging.debug(f"build label2idx dict finished, contains {len(self.label2idx)} labels.")
def process_y_dataset(self,
data: List[List[str]],
max_len: Optional[int] = None,
subset: Optional[List[int]] = None) -> np.ndarray:
if subset is not None:
target = utils.get_list_subset(data, subset)
else:
target = data[:]
numerized_samples = self.numerize_label_sequences(target)
padded_seq = pad_sequences(
numerized_samples, max_len, padding='post', truncating='post')
return to_categorical(padded_seq, len(self.label2idx))
def numerize_token_sequences(self,
sequences: List[List[str]]):
result = []
for seq in sequences:
if self.add_bos_eos:
seq = [self.token_bos] + seq + [self.token_eos]
unk_index = self.token2idx[self.token_unk]
result.append([self.token2idx.get(token, unk_index) for token in seq])
return result
def numerize_label_sequences(self,
sequences: List[List[str]]) -> List[List[int]]:
result = []
for seq in sequences:
if self.add_bos_eos:
seq = [self.token_pad] + seq + [self.token_pad]
result.append([self.label2idx[label] for label in seq])
return result
def reverse_numerize_label_sequences(self,
sequences,
lengths=None):
result = []
for index, seq in enumerate(sequences):
labels = []
if self.add_bos_eos:
seq = seq[1:]
for idx in seq:
labels.append(self.idx2label[idx])
if lengths is not None:
labels = labels[:lengths[index]]
result.append(labels)
return result
if __name__ == "__main__":
from kashgari.corpus import ChineseDailyNerCorpus
x, y = ChineseDailyNerCorpus.load_data()
p = LabelingProcessor()
p.analyze_corpus(x, y)
r = p.process_x_dataset(x, subset=[10, 12, 20])
print(r)
| 125softNLP | /125softNLP-0.0.1-py3-none-any.whl/pysoftNLP/kashgari/processors/labeling_processor.py | labeling_processor.py |
# encoding: utf-8
# author: BrikerMan
# contact: eliyar917@gmail.com
# blog: https://eliyar.biz
# file: __init__.py.py
# time: 2019-05-20 10:54
from pysoftNLP.kashgari.processors.classification_processor import ClassificationProcessor
from pysoftNLP.kashgari.processors.labeling_processor import LabelingProcessor
from pysoftNLP.kashgari.processors.scoring_processor import ScoringProcessor
| 125softNLP | /125softNLP-0.0.1-py3-none-any.whl/pysoftNLP/kashgari/processors/__init__.py | __init__.py |
from typing import List, Optional
import numpy as np
from tensorflow.python.keras.utils import to_categorical
import pysoftNLP.kashgari as kashgari
from pysoftNLP.kashgari import utils
from pysoftNLP.kashgari.processors.base_processor import BaseProcessor
from sklearn.preprocessing import MultiLabelBinarizer
class ClassificationProcessor(BaseProcessor):
"""
Corpus Pre Processor class
"""
def __init__(self, multi_label=False, **kwargs):
super(ClassificationProcessor, self).__init__(**kwargs)
self.multi_label = multi_label
if self.label2idx:
self.multi_label_binarizer: MultiLabelBinarizer = MultiLabelBinarizer(classes=list(self.label2idx.keys()))
self.multi_label_binarizer.fit([])
else:
self.multi_label_binarizer: MultiLabelBinarizer = None
def info(self):
info = super(ClassificationProcessor, self).info()
info['task'] = kashgari.CLASSIFICATION
info['config']['multi_label'] = self.multi_label
return info
def _build_label_dict(self,
labels: List[str]):
if self.multi_label:
label_set = set()
for i in labels:
label_set = label_set.union(list(i))
else:
label_set = set(labels)
self.label2idx = {}
for idx, label in enumerate(sorted(label_set)):
self.label2idx[label] = len(self.label2idx)
self.idx2label = dict([(value, key) for key, value in self.label2idx.items()])
self.dataset_info['label_count'] = len(self.label2idx)
self.multi_label_binarizer = MultiLabelBinarizer(classes=list(self.label2idx.keys()))
def process_y_dataset(self,
data: List[str],
max_len: Optional[int] = None,
subset: Optional[List[int]] = None) -> np.ndarray:
if subset is not None:
target = utils.get_list_subset(data, subset)
else:
target = data
if self.multi_label:
return self.multi_label_binarizer.fit_transform(target)
else:
numerized_samples = self.numerize_label_sequences(target)
return to_categorical(numerized_samples, len(self.label2idx))
def numerize_token_sequences(self,
sequences: List[List[str]]):
result = []
for seq in sequences:
if self.add_bos_eos:
seq = [self.token_bos] + seq + [self.token_eos]
unk_index = self.token2idx[self.token_unk]
result.append([self.token2idx.get(token, unk_index) for token in seq])
return result
def numerize_label_sequences(self,
sequences: List[str]) -> List[int]:
"""
Convert label sequence to label-index sequence
``['O', 'O', 'B-ORG'] -> [0, 0, 2]``
Args:
sequences: label sequence, list of str
Returns:
label-index sequence, list of int
"""
return [self.label2idx[label] for label in sequences]
def reverse_numerize_label_sequences(self, sequences, **kwargs):
if self.multi_label:
return self.multi_label_binarizer.inverse_transform(sequences)
else:
return [self.idx2label[label] for label in sequences]
if __name__ == "__main__":
from kashgari.corpus import SMP2018ECDTCorpus
x, y = SMP2018ECDTCorpus.load_data()
p = ClassificationProcessor()
p.analyze_corpus(x, y)
| 125softNLP | /125softNLP-0.0.1-py3-none-any.whl/pysoftNLP/kashgari/processors/classification_processor.py | classification_processor.py |
# -*- coding: utf-8 -*-
# @Time : 2020/8/10-17:13
# @Author : 贾志凯
# @File : train.py
# @Software: win10 python3.6 PyCharm
from pysoftNLP.kashgari.corpus import ChineseDailyNerCorpus
from pysoftNLP.kashgari.tasks.labeling import BiLSTM_CRF_Model,BiGRU_CRF_Model,BiGRU_Model,BiLSTM_Model,CNN_LSTM_Model
import pysoftNLP.kashgari as kashgari
from pysoftNLP.kashgari.embeddings import BERTEmbedding
import os
def train(args,output_path):
#加载语料库
train_x, train_y = ChineseDailyNerCorpus.load_data('train')
valid_x, valid_y = ChineseDailyNerCorpus.load_data('validate')
test_x, test_y = ChineseDailyNerCorpus.load_data('test')
print(f"训练集大小: {len(train_x)}")
print(f"验证集大小: {len(valid_x)}")
print(f"测试集大小: {len(test_x)}")
print(test_x[:1])
#训练
bert_embed = BERTEmbedding('D:\pysoftNLP_resources\pre_training_file\chinese_L-12_H-768_A-12',
task=kashgari.LABELING,
sequence_length=args['sentence_length'])
model = BiLSTM_CRF_Model(bert_embed)
model.fit(train_x,
train_y,
x_validate=valid_x,
y_validate=valid_y,
epochs=args['epochs'],
batch_size=args['batch_size'])
basis = 'D:\pysoftNLP_resources\entity_recognition'
model_path = os.path.join(basis, output_path)
model.save(model_path)
#评估
print(model.evaluate(test_x, test_y))
#预测
# loaded_model = kashgari.utils.load_model('saved_ner_model')
if __name__ == '__main__':
args = {'sentence_length': 100, 'batch_size': 512, 'epochs': 20}
output_path = 'ner_company'
train(args,output_path)
| 125softNLP | /125softNLP-0.0.1-py3-none-any.whl/pysoftNLP/ner/train.py | train.py |
# -*- coding: utf-8 -*-
# @Time : 2020/8/10-17:42
# @Author : 贾志凯
# @File : pre.py
# @Software: win10 python3.6 PyCharm
import pysoftNLP.kashgari as kashgari
import re
import time
import os
import pandas as pd
def load_model(model_name = 'ner'):
basis = 'D:\pysoftNLP_resources\entity_recognition'
model_path = os.path.join(basis, model_name)
load_start = time.time()
loaded_model = kashgari.utils.load_model(model_path)
load_end = time.time()
print("模型加载时间:",load_end-load_start)
return loaded_model
#
def cut_text(text, lenth):
textArr = re.findall('.{' + str(lenth) + '}', text)
textArr.append(text[(len(textArr) * lenth):])
return textArr
def extract_labels(text, ners):
ner_reg_list = []
if ners:
new_ners = []
for ner in ners:
new_ners += ner
for word, tag in zip([char for char in text], new_ners):
if tag != 'O':
ner_reg_list.append((word, tag))
# 输出模型的NER识别结果
labels = {}
if ner_reg_list:
for i, item in enumerate(ner_reg_list):
if item[1].startswith('B'):
label = ""
end = i + 1
while end <= len(ner_reg_list) - 1 and ner_reg_list[end][1].startswith('I'):
end += 1
ner_type = item[1].split('-')[1]
if ner_type not in labels.keys():
labels[ner_type] = []
label += ''.join([item[0] for item in ner_reg_list[i:end]])
labels[ner_type].append(label)
return labels
#文本分段
def text_pattern(text):
list = ['集团|公司','。|,|?|!|、|;|;|:']
text = '。。' + text
text = text[::-1]
temp = []
def dfs(text, temp):
if not text:
return temp
pattern_text = re.compile(list[0][::-1]).findall(text)
if pattern_text:
text = pattern_text[0] + text.split(pattern_text[0], 1)[1]
comma = re.compile(list[1]).findall(text)[0]
res_text = text.split(comma, 1)[0]
temp.append(res_text[::-1])
text = text.split(comma, 1)[1]
else:
# res.append(temp[:]) <class 'list'>: ['中广核新能源湖南分公司']
return temp
dfs(text,temp)
dfs(text,temp)
return temp
def final_test(path,model_name):
import pandas as pd
data = pd.read_table(path, header=None, encoding='utf-8', sep='\t')
data = data[:200]
data.columns = ['标题', '内容']
data['nr'] = data['标题'] + data['内容']
data['te'] = ''
for i in range(len(data)):
first_text = data['nr'][i].replace(" ", "")
print("原始文本:",first_text)
last_text = text_pattern(first_text)
if not last_text:
continue
last = []
for text_input in last_text:
texts = cut_text(text_input, 100)
pre_start = time.time()
ners = load_model(model_name).predict([[char for char in text] for text in texts])
pre_end = time.time()
print("切割文章的预测时间:",pre_end - pre_start)
print("切割的文章内容:",text_input)
print("切割文本的BIO结果:",ners)
labels = extract_labels(text_input, ners)
res = []
if labels.__contains__('ORG') and labels.__contains__('LOC'):
entity = labels['ORG'] + labels['LOC']
elif labels.__contains__('ORG'):
entity = labels['ORG']
elif labels.__contains__('LOC'):
entity = labels['LOC']
else:
entity = []
for j in entity:
punc = '~`!#$%^&*()_+-=|\';":/.,?><~·!@#¥%……&*()——+-=“:’;、。,?》《{}'
j = re.sub(r"[%s]+" % punc, "", j)
if re.fullmatch('集团|公司|子公司|本公司|家公司|分公司|上市公司', j):# j == '公司' or j =='集团' or j == '子公司' or j =='本公司' or j == '家公司' or j =='分公司' or j =='上市公司' or j =='母公司': #re.compile('子公司|本公司|家公司|分公司|上市公司').findall(str(j)) or
break
if re.fullmatch('丰田|华为|苹果|微软|爱立信|阿里|三星|中国联通|中国移动|腾讯|联想|台机电|小米|亚马逊|甲骨文|高通|软银|特斯拉|百度|中石化|中石油', j):#j =='华为' or j =='苹果' or j =='微软' or j=='爱立信' or j=='阿里' or j =='三星' or j =='中国联通' or j =='中国移动' or j =='腾讯' or j =='联想':
res.append(j)
elif re.compile('集团|公司|科技|煤炭|医药|工厂|国际|银行|钢铁|机械').findall(str(j[-2:])): #'集团|有限公司|公司|科技|医药|苹果|华为|谷歌|河南863|富士康'
res.append(j)
res = list(set(res))
print("各个类型的实体结果:", entity)
print("集团公司:", res)
if res:
last.append('|'.join(res))
last = list(set(last))
data['te'][i] = '|'.join(last)
print('最后的公司结果:',"|".join(last))
pd.DataFrame(data).to_csv('result/a.csv', index=False)
#单句预测
def single_sentence(sentence,model_name):
first_text = sentence.replace(" ", "")
print("原始文本:", first_text)
last_text = text_pattern(first_text)
if last_text:
last = []
for text_input in last_text:
texts = cut_text(text_input, 100)
pre_start = time.time()
ners = load_model(model_name).predict([[char for char in text] for text in texts])
pre_end = time.time()
print("切割文章的预测时间:", pre_end - pre_start)
print("切割的文章内容:", text_input)
print("切割文本的BIO结果:", ners)
labels = extract_labels(text_input, ners)
res = []
if labels.__contains__('ORG') and labels.__contains__('LOC'):
entity = labels['ORG'] + labels['LOC']
elif labels.__contains__('ORG'):
entity = labels['ORG']
elif labels.__contains__('LOC'):
entity = labels['LOC']
else:
entity = []
for j in entity:
punc = '~`!#$%^&*()_+-=|\';":/.,?><~·!@#¥%……&*()——+-=“:’;、。,?》《{}'
j = re.sub(r"[%s]+" % punc, "", j)
if re.fullmatch('集团|公司|子公司|本公司|家公司|分公司|上市公司',
j): # j == '公司' or j =='集团' or j == '子公司' or j =='本公司' or j == '家公司' or j =='分公司' or j =='上市公司' or j =='母公司': #re.compile('子公司|本公司|家公司|分公司|上市公司').findall(str(j)) or
break
if re.fullmatch('丰田|华为|苹果|微软|爱立信|阿里|三星|中国联通|中国移动|腾讯|联想|台机电|小米|亚马逊|甲骨文|高通|软银|特斯拉|百度|中石化|中石油',
j): # j =='华为' or j =='苹果' or j =='微软' or j=='爱立信' or j=='阿里' or j =='三星' or j =='中国联通' or j =='中国移动' or j =='腾讯' or j =='联想':
res.append(j)
elif re.compile('集团|公司|科技|煤炭|医药|工厂|国际|银行|钢铁|机械').findall(
str(j[-2:])): # '集团|有限公司|公司|科技|医药|苹果|华为|谷歌|河南863|富士康'
res.append(j)
res = list(set(res))
print("各个类型的实体结果:", entity)
print("集团公司:", res)
if res:
last.append('|'.join(res))
last = list(set(last))
result = "|".join(last)
print('最后的公司结果:', result)
return result
#列表式预测
def multi_sentence(sentencelist,out_path,model_name):
df_data_output = pd.DataFrame()
df_data_output['text'] = sentencelist
df_data_output['ner'] = ''
for i in range(len(sentencelist)):
first_text = sentencelist[i].replace(" ", "")
last_text = text_pattern(first_text)
if not last_text:
continue
last = []
for text_input in last_text:
texts = cut_text(text_input, 100)
ners = load_model(model_name).predict([[char for char in text] for text in texts])
labels = extract_labels(text_input, ners)
res = []
if labels.__contains__('ORG') and labels.__contains__('LOC'):
entity = labels['ORG'] + labels['LOC']
elif labels.__contains__('ORG'):
entity = labels['ORG']
elif labels.__contains__('LOC'):
entity = labels['LOC']
else:
entity = []
for j in entity:
punc = '~`!#$%^&*()_+-=|\';":/.,?><~·!@#¥%……&*()——+-=“:’;、。,?》《{}'
j = re.sub(r"[%s]+" % punc, "", j)
if re.fullmatch('集团|公司|子公司|本公司|家公司|分公司|上市公司',
j): # j == '公司' or j =='集团' or j == '子公司' or j =='本公司' or j == '家公司' or j =='分公司' or j =='上市公司' or j =='母公司': #re.compile('子公司|本公司|家公司|分公司|上市公司').findall(str(j)) or
break
if re.fullmatch('丰田|华为|苹果|微软|爱立信|阿里|三星|中国联通|中国移动|腾讯|联想|台机电|小米|亚马逊|甲骨文|高通|软银|特斯拉|百度|中石化|中石油',
j): # j =='华为' or j =='苹果' or j =='微软' or j=='爱立信' or j=='阿里' or j =='三星' or j =='中国联通' or j =='中国移动' or j =='腾讯' or j =='联想':
res.append(j)
elif re.compile('集团|公司|科技|煤炭|医药|工厂|国际|银行|钢铁|机械').findall(
str(j[-2:])): # '集团|有限公司|公司|科技|医药|苹果|华为|谷歌|河南863|富士康'
res.append(j)
res = list(set(res))
# print("各个类型的实体结果:", entity)
# print("集团公司:", res)
if res:
last.append('|'.join(res))
last = list(set(last))
df_data_output['ner'][i] = '|'.join(last)
out_path = os.path.join(out_path, r'result.csv')
# print('最后的公司结果:', "|".join(last))
pd.DataFrame(df_data_output).to_csv(out_path, index=False)
path = 'C:/Users/Administrator/Desktop/a.txt'
def prdict(path):
final_test(path)
# text_input = input('句子: ').stride()x.drop('',axis = 1)
'''import re
# text_input = 'BAT:B指百度,A指阿里巴巴,T指腾讯,是中国互联网du公司百度zhi公司(Baidu),阿里巴巴集团(Alibaba),腾讯公司(Tencent)三大互联网公司首字母的缩写。BAT已经成为中国最大的三家互联网公司'
# text_input ='“新冠疫情或让印度的大国梦碎。”印度《经济时报》5日以此为题报道称,疫情正在对印度经济造成严重影响。据印度卫生部门统计,截至当地时间6日早,印度过去一天新增新冠肺炎确诊病例90633例,再创历史新高,累计确诊已超411万例,累计死亡70626例。印度即将超过巴西成为仅次于美国的全球确诊病例第二高的国家。'
# text_input = '工程机械持续火热:国内挖掘机销量连5个月同比增速超50% 当前,工程机械行业持续火热。业内人士认为,国内复工复产和基建、房地产共同带来的需求,是拉动工程机械销量在二季度实现大反弹的主要因素。预计四季度工程机械行业仍将维持高景气,增长势头有望延续。 据最新数据,2020年8月纳入统计的25家挖掘机制造企业共销售各类挖掘机20939台,同比增长51.3%。国内挖掘机销量连续5个月同比增速保持50%以上。今年前8个月销售总量已占到2019年全年销量的89.3%。'
input = ['中广核新能源湖南分公司','该公司','中广核新能源公司']
last = []
for text_input in input:
texts = cut_text(text_input, 100)
pre_start= time.time()
ners = loaded_model.predict([[char for char in text] for text in texts])
pre_end = time.time()
print("预测时间:",pre_end - pre_start)
print("文章内容:",text_input)
print("BIO结果:",ners)
labels = extract_labels(text_input, ners)
res = []
if labels.__contains__('ORG') and labels.__contains__('LOC'):
entity = labels['ORG'] + labels['LOC']
elif labels.__contains__('ORG'):
entity = labels['ORG']
elif labels.__contains__('LOC'):
entity = labels['LOC']
else:
entity = []
for j in entity:
punc = '~`!#$%^&*()_+-=|\';":/.,?><~·!@#¥%……&*()——+-=“:’;、。,?》《{}'
j = re.sub(r"[%s]+" % punc, "", j)
if re.fullmatch('集团|公司|子公司|本公司|家公司|分公司|上市公司', j):# j == '公司' or j =='集团' or j == '子公司' or j =='本公司' or j == '家公司' or j =='分公司' or j =='上市公司' or j =='母公司': #re.compile('子公司|本公司|家公司|分公司|上市公司').findall(str(j)) or
break
if re.fullmatch('丰田|华为|苹果|微软|爱立信|阿里|三星|中国联通|中国移动|腾讯|联想|台机电|小米|亚马逊|甲骨文|高通|软银|特斯拉|百度|中石化|中石油', j):#j =='华为' or j =='苹果' or j =='微软' or j=='爱立信' or j=='阿里' or j =='三星' or j =='中国联通' or j =='中国移动' or j =='腾讯' or j =='联想':
res.append(j)
# break
elif re.compile('集团|公司|科技|煤炭|医药|工厂|国际|银行|钢铁|机械').findall(str(j[-2:])): #'集团|有限公司|公司|科技|医药|苹果|华为|谷歌|河南863|富士康'
res.append(j)
res = list(set(res))
# data['te'][i] = '|'.join(res)
# print("各个类型的实体结果:", labels['ORG'])
# print(labels,type(labels))
print("各个类型的实体结果:", entity)
print("集团公司:", res)
if res:
last.append(''.join(res))
print(last)
print('最后的公司结果:',"|".join(last))'''
| 125softNLP | /125softNLP-0.0.1-py3-none-any.whl/pysoftNLP/ner/pre.py | pre.py |
# -*- coding: utf-8 -*-
# @Time : 2020/9/21-16:06
# @Author : 贾志凯
# @File : read_data.py
# @Software: win10 python3.6 PyCharm
# import pandas as pd
# data = pd.read_table('C:/Users/Administrator/Desktop/a.txt', header=None, encoding='utf-8', sep='\t')
# data.columns = ['标题', '内容']
# data['nr'] = data['标题'] + data['内容']
# data['te'] = ''
# print(data['nr'][0])
#
import re
text = '根据伊泰集团2020年9月13日18点更新的销售价格显示,伊泰集团较上周五调整部分煤矿煤炭坑口价格-30~20元/吨,具体如下: 煤矿名称 产品名称 价格(元/吨) 较9月11日调整(元/吨)'
print(re.compile('集团|公司').findall(text))
# text = '丰田'
# pattern = '丰田|丰田曾宣布与日本宇宙航空研究机构(JAXA)联手开发未来能够在月球上运动的燃料电池六轮月球车'
# m = re.search(pattern, text)
# print('Search :', m)
# s = re.fullmatch(pattern, text)
# print('Full match :', s)
# if s:
# print(text) | 125softNLP | /125softNLP-0.0.1-py3-none-any.whl/pysoftNLP/ner/read_data.py | read_data.py |
# -*- coding: utf-8 -*-
# @Time : 2020/8/12-22:05
# @Author : 贾志凯
# @File : __init__.py.py
# @Software: win10 python3.6 PyCharm
__all__ = ['pre','train'] | 125softNLP | /125softNLP-0.0.1-py3-none-any.whl/pysoftNLP/ner/__init__.py | __init__.py |
# encoding: utf-8
# author: BrikerMan
# contact: eliyar917@gmail.com
# blog: https://eliyar.biz
# file: migration.py
# time: 2:31 下午
import subprocess
import logging
guide = """
╭─────────────────────────────────────────────────────────────────────────╮
│ ◎ ○ ○ ░░░░░░░░░░░░░░░░░░░░░ Important Message ░░░░░░░░░░░░░░░░░░░░░░░░│
├─────────────────────────────────────────────────────────────────────────┤
│ │
│ We renamed again for consistency and clarity. │
│ From now on, it is all `kashgari`. │
│ Changelog: https://github.com/BrikerMan/Kashgari/releases/tag/v1.0.0 │
│ │
│ | Backend | pypi version | desc | │
│ | ---------------- | -------------- | -------------- | │
│ | TensorFlow 2.x | kashgari 2.x.x | coming soon | │
│ | TensorFlow 1.14+ | kashgari 1.x.x | | │
│ | Keras | kashgari 0.x.x | legacy version | │
│ │
╰─────────────────────────────────────────────────────────────────────────╯
"""
def show_migration_guide():
requirements = subprocess.getoutput("pip freeze")
for package in requirements.splitlines():
if '==' in package:
package_name, package_version = package.split('==')
if package_name == 'kashgari-tf':
logging.warning(guide)
if __name__ == "__main__":
show_migration_guide()
print("hello, world")
| 125softNLP | /125softNLP-0.0.1-py3-none-any.whl/pysoftNLP/ner/kashgari/migeration.py | migeration.py |
# encoding: utf-8
"""
@author: BrikerMan
@contact: eliyar917@gmail.com
@blog: https://eliyar.biz
@version: 1.0
@license: Apache Licence
@file: macros.py
@time: 2019-05-17 11:38
"""
import os
import logging
from pathlib import Path
import tensorflow as tf
DATA_PATH = os.path.join(str(Path.home()), '.kashgari')
Path(DATA_PATH).mkdir(exist_ok=True, parents=True)
class TaskType(object):
CLASSIFICATION = 'classification'
LABELING = 'labeling'
SCORING = 'scoring'
class Config(object):
def __init__(self):
self._use_cudnn_cell = False
self.disable_auto_summary = False
if tf.test.is_gpu_available(cuda_only=True):
logging.warning("CUDA GPU available, you can set `kashgari.config.use_cudnn_cell = True` to use CuDNNCell. "
"This will speed up the training, "
"but will make model incompatible with CPU device.")
@property
def use_cudnn_cell(self):
return self._use_cudnn_cell
@use_cudnn_cell.setter
def use_cudnn_cell(self, value):
self._use_cudnn_cell = value
from kashgari.layers import L
if value:
if tf.test.is_gpu_available(cuda_only=True):
L.LSTM = tf.compat.v1.keras.layers.CuDNNLSTM
L.GRU = tf.compat.v1.keras.layers.CuDNNGRU
logging.warning("CuDNN enabled, this will speed up the training, "
"but will make model incompatible with CPU device.")
else:
logging.warning("Unable to use CuDNN cell, no GPU available.")
else:
L.LSTM = tf.keras.layers.LSTM
L.GRU = tf.keras.layers.GRU
def to_dict(self):
return {
'use_cudnn_cell': self.use_cudnn_cell
}
config = Config()
if __name__ == "__main__":
print("Hello world")
| 125softNLP | /125softNLP-0.0.1-py3-none-any.whl/pysoftNLP/ner/kashgari/macros.py | macros.py |
# encoding: utf-8
# author: BrikerMan
# contact: eliyar917@gmail.com
# blog: https://eliyar.biz
# file: __version__.py.py
# time: 2019-05-20 16:32
__version__ = '1.1.1'
| 125softNLP | /125softNLP-0.0.1-py3-none-any.whl/pysoftNLP/ner/kashgari/__version__.py | __version__.py |
# encoding: utf-8
import os
import logging
import pandas as pd
from kashgari import macros as k
from typing import Tuple, List
from tensorflow.python.keras.utils import get_file
from kashgari import utils
CORPUS_PATH = os.path.join(k.DATA_PATH, 'corpus')
class DataReader(object):
@staticmethod
def read_conll_format_file(file_path: str,
text_index: int = 0,
label_index: int = 1) -> Tuple[List[List[str]], List[List[str]]]:
"""
Read conll format data_file
Args:
file_path: path of target file
text_index: index of text data, default 0
label_index: index of label data, default 1
Returns:
"""
x_data, y_data = [], []
with open(file_path, 'r', encoding='utf-8') as f:
lines = f.read().splitlines()
x, y = [], []
for line in lines:
rows = line.split(' ')
if len(rows) == 1:
x_data.append(x)
y_data.append(y)
x = []
y = []
else:
x.append(rows[text_index])
y.append(rows[label_index])
return x_data, y_data
class ChineseDailyNerCorpus(object):
"""
Chinese Daily New New Corpus
https://github.com/zjy-ucas/ChineseNER/
"""
__corpus_name__ = 'china-people-daily-ner-corpus'
__zip_file__name = 'http://s3.bmio.net/kashgari/china-people-daily-ner-corpus.tar.gz'
@classmethod
def load_data(cls,
subset_name: str = 'train',
shuffle: bool = True) -> Tuple[List[List[str]], List[List[str]]]:
"""
Load dataset as sequence labeling format, char level tokenized
features: ``[['海', '钓', '比', '赛', '地', '点', '在', '厦', '门', ...], ...]``
labels: ``[['O', 'O', 'O', 'O', 'O', 'O', 'O', 'B-LOC', 'I-LOC', ...], ...]``
Sample::
train_x, train_y = ChineseDailyNerCorpus.load_data('train')
test_x, test_y = ChineseDailyNerCorpus.load_data('test')
Args:
subset_name: {train, test, valid}
shuffle: should shuffle or not, default True.
Returns:
dataset_features and dataset labels
"""
corpus_path = get_file(cls.__corpus_name__,
cls.__zip_file__name,
cache_dir=k.DATA_PATH,
untar=True)
corpus_path = './china-people-daily-ner-corpus'
print(corpus_path)
if subset_name == 'train':
file_path = os.path.join(corpus_path, 'example.train')
elif subset_name == 'test':
file_path = os.path.join(corpus_path, 'example.test')
else:
file_path = os.path.join(corpus_path, 'example.dev')
x_data, y_data = DataReader.read_conll_format_file(file_path)
if shuffle:
x_data, y_data = utils.unison_shuffled_copies(x_data, y_data)
logging.debug(f"loaded {len(x_data)} samples from {file_path}. Sample:\n"
f"x[0]: {x_data[0]}\n"
f"y[0]: {y_data[0]}")
return x_data, y_data
class CONLL2003ENCorpus(object):
__corpus_name__ = 'conll2003_en'
__zip_file__name = 'http://s3.bmio.net/kashgari/conll2003_en.tar.gz'
@classmethod
def load_data(cls,
subset_name: str = 'train',
task_name: str = 'ner',
shuffle: bool = True) -> Tuple[List[List[str]], List[List[str]]]:
"""
"""
corpus_path = get_file(cls.__corpus_name__,
cls.__zip_file__name,
cache_dir=k.DATA_PATH,
untar=True)
if subset_name not in {'train', 'test', 'valid'}:
raise ValueError()
file_path = os.path.join(corpus_path, f'{subset_name}.txt')
if task_name not in {'pos', 'chunking', 'ner'}:
raise ValueError()
data_index = ['pos', 'chunking', 'ner'].index(task_name) + 1
x_data, y_data = DataReader.read_conll_format_file(file_path, label_index=data_index)
if shuffle:
x_data, y_data = utils.unison_shuffled_copies(x_data, y_data)
logging.debug(f"loaded {len(x_data)} samples from {file_path}. Sample:\n"
f"x[0]: {x_data[0]}\n"
f"y[0]: {y_data[0]}")
return x_data, y_data
class SMP2018ECDTCorpus(object):
"""
https://worksheets.codalab.org/worksheets/0x27203f932f8341b79841d50ce0fd684f/
This dataset is released by the Evaluation of Chinese Human-Computer Dialogue Technology (SMP2018-ECDT)
task 1 and is provided by the iFLYTEK Corporation, which is a Chinese human-computer dialogue dataset.
sample::
label query
0 weather 今天东莞天气如何
1 map 从观音桥到重庆市图书馆怎么走
2 cookbook 鸭蛋怎么腌?
3 health 怎么治疗牛皮癣
4 chat 唠什么
"""
__corpus_name__ = 'SMP2018ECDTCorpus'
__zip_file__name = 'http://s3.bmio.net/kashgari/SMP2018ECDTCorpus.tar.gz'
@classmethod
def load_data(cls,
subset_name: str = 'train',
shuffle: bool = True,
cutter: str = 'char') -> Tuple[List[List[str]], List[str]]:
"""
Load dataset as sequence classification format, char level tokenized
features: ``[['听', '新', '闻', '。'], ['电', '视', '台', '在', '播', '什', '么'], ...]``
labels: ``['news', 'epg', ...]``
Samples::
train_x, train_y = SMP2018ECDTCorpus.load_data('train')
test_x, test_y = SMP2018ECDTCorpus.load_data('test')
Args:
subset_name: {train, test, valid}
shuffle: should shuffle or not, default True.
cutter: sentence cutter, {char, jieba}
Returns:
dataset_features and dataset labels
"""
corpus_path = get_file(cls.__corpus_name__,
cls.__zip_file__name,
cache_dir=k.DATA_PATH,
untar=True)
if cutter not in ['char', 'jieba', 'none']:
raise ValueError('cutter error, please use one onf the {char, jieba}')
df_path = os.path.join(corpus_path, f'{subset_name}.csv')
df = pd.read_csv(df_path)
if cutter == 'jieba':
try:
import jieba
except ModuleNotFoundError:
raise ModuleNotFoundError(
"please install jieba, `$ pip install jieba`")
x_data = [list(jieba.cut(item)) for item in df['query'].to_list()]
elif 'char':
x_data = [list(item) for item in df['query'].to_list()]
y_data = df['label'].to_list()
if shuffle:
x_data, y_data = utils.unison_shuffled_copies(x_data, y_data)
logging.debug(f"loaded {len(x_data)} samples from {df_path}. Sample:\n"
f"x[0]: {x_data[0]}\n"
f"y[0]: {y_data[0]}")
return x_data, y_data
if __name__ == "__main__":
a, b = CONLL2003ENCorpus.load_data()
print(a[:2])
print(b[:2])
print("Hello world")
| 125softNLP | /125softNLP-0.0.1-py3-none-any.whl/pysoftNLP/ner/kashgari/corpus.py | corpus.py |
# encoding: utf-8
# author: BrikerMan
# contact: eliyar917@gmail.com
# blog: https://eliyar.biz
# file: callbacks.py
# time: 2019-05-22 15:00
from sklearn import metrics
from kashgari import macros
from tensorflow.python import keras
from kashgari.tasks.base_model import BaseModel
from seqeval import metrics as seq_metrics
class EvalCallBack(keras.callbacks.Callback):
def __init__(self, kash_model: BaseModel, valid_x, valid_y,
step=5, batch_size=256, average='weighted'):
"""
Evaluate callback, calculate precision, recall and f1
Args:
kash_model: the kashgari model to evaluate
valid_x: feature data
valid_y: label data
step: step, default 5
batch_size: batch size, default 256
"""
super(EvalCallBack, self).__init__()
self.kash_model = kash_model
self.valid_x = valid_x
self.valid_y = valid_y
self.step = step
self.batch_size = batch_size
self.average = average
self.logs = []
def on_epoch_end(self, epoch, logs=None):
if (epoch + 1) % self.step == 0:
y_pred = self.kash_model.predict(self.valid_x, batch_size=self.batch_size)
if self.kash_model.task == macros.TaskType.LABELING:
y_true = [seq[:len(y_pred[index])] for index, seq in enumerate(self.valid_y)]
precision = seq_metrics.precision_score(y_true, y_pred)
recall = seq_metrics.recall_score(y_true, y_pred)
f1 = seq_metrics.f1_score(y_true, y_pred)
else:
y_true = self.valid_y
precision = metrics.precision_score(y_true, y_pred, average=self.average)
recall = metrics.recall_score(y_true, y_pred, average=self.average)
f1 = metrics.f1_score(y_true, y_pred, average=self.average)
self.logs.append({
'precision': precision,
'recall': recall,
'f1': f1
})
print(f"\nepoch: {epoch} precision: {precision:.6f}, recall: {recall:.6f}, f1: {f1:.6f}")
if __name__ == "__main__":
print("Hello world")
| 125softNLP | /125softNLP-0.0.1-py3-none-any.whl/pysoftNLP/ner/kashgari/callbacks.py | callbacks.py |
# encoding: utf-8
"""
@author: BrikerMan
@contact: eliyar917@gmail.com
@blog: https://eliyar.biz
@version: 1.0
@license: Apache Licence
@file: helpers.py
@time: 2019-05-17 11:37
"""
import json
import os
import pathlib
import pydoc
import random
import time
from typing import List, Optional, Dict, Union
import tensorflow as tf
from tensorflow.python import keras, saved_model
from kashgari import custom_objects
from kashgari.embeddings.base_embedding import Embedding
from kashgari.layers.crf import CRF
from kashgari.processors.base_processor import BaseProcessor
from kashgari.tasks.base_model import BaseModel
from kashgari.tasks.classification.base_model import BaseClassificationModel
from kashgari.tasks.labeling.base_model import BaseLabelingModel
def unison_shuffled_copies(a, b):
assert len(a) == len(b)
c = list(zip(a, b))
random.shuffle(c)
a, b = zip(*c)
return list(a), list(b)
def get_list_subset(target: List, index_list: List[int]) -> List:
return [target[i] for i in index_list if i < len(target)]
def custom_object_scope():
return tf.keras.utils.custom_object_scope(custom_objects)
def load_model(model_path: str, load_weights: bool = True) -> Union[BaseClassificationModel, BaseLabelingModel]:
"""
Load saved model from saved model from `model.save` function
Args:
model_path: model folder path
load_weights: only load model structure and vocabulary when set to False, default True.
Returns:
"""
with open(os.path.join(model_path, 'model_info.json'), 'r') as f:
model_info = json.load(f)
model_class = pydoc.locate(f"{model_info['module']}.{model_info['class_name']}")
model_json_str = json.dumps(model_info['tf_model'])
model = model_class()
model.tf_model = tf.keras.models.model_from_json(model_json_str, custom_objects)
if load_weights:
model.tf_model.load_weights(os.path.join(model_path, 'model_weights.h5'))
embed_info = model_info['embedding']
embed_class = pydoc.locate(f"{embed_info['module']}.{embed_info['class_name']}")
embedding: Embedding = embed_class._load_saved_instance(embed_info,
model_path,
model.tf_model)
model.embedding = embedding
if type(model.tf_model.layers[-1]) == CRF:
model.layer_crf = model.tf_model.layers[-1]
return model
def load_processor(model_path: str) -> BaseProcessor:
"""
Load processor from model
When we using tf-serving, we need to use model's processor to pre-process data
Args:
model_path:
Returns:
"""
with open(os.path.join(model_path, 'model_info.json'), 'r') as f:
model_info = json.load(f)
processor_info = model_info['embedding']['processor']
processor_class = pydoc.locate(f"{processor_info['module']}.{processor_info['class_name']}")
processor: BaseProcessor = processor_class(**processor_info['config'])
return processor
def convert_to_saved_model(model: BaseModel,
model_path: str,
version: str = None,
inputs: Optional[Dict] = None,
outputs: Optional[Dict] = None):
"""
Export model for tensorflow serving
Args:
model: Target model
model_path: The path to which the SavedModel will be stored.
version: The model version code, default timestamp
inputs: dict mapping string input names to tensors. These are added
to the SignatureDef as the inputs.
outputs: dict mapping string output names to tensors. These are added
to the SignatureDef as the outputs.
"""
pathlib.Path(model_path).mkdir(exist_ok=True, parents=True)
if version is None:
version = round(time.time())
export_path = os.path.join(model_path, str(version))
if inputs is None:
inputs = {i.name: i for i in model.tf_model.inputs}
if outputs is None:
outputs = {o.name: o for o in model.tf_model.outputs}
sess = keras.backend.get_session()
saved_model.simple_save(session=sess,
export_dir=export_path,
inputs=inputs,
outputs=outputs)
with open(os.path.join(export_path, 'model_info.json'), 'w') as f:
f.write(json.dumps(model.info(), indent=2, ensure_ascii=True))
f.close()
if __name__ == "__main__":
path = '/Users/brikerman/Desktop/python/Kashgari/tests/classification/saved_models/' \
'kashgari.tasks.classification.models/BiLSTM_Model'
p = load_processor(path)
print(p.process_x_dataset([list('语言模型')]))
print(p.label2idx)
print(p.token2idx)
| 125softNLP | /125softNLP-0.0.1-py3-none-any.whl/pysoftNLP/ner/kashgari/utils.py | utils.py |
# encoding: utf-8
# author: BrikerMan
# contact: eliyar917@gmail.com
# blog: https://eliyar.biz
# file: migration.py
# time: 2:31 下午
import subprocess
import logging
guide = """
╭─────────────────────────────────────────────────────────────────────────╮
│ ◎ ○ ○ ░░░░░░░░░░░░░░░░░░░░░ Important Message ░░░░░░░░░░░░░░░░░░░░░░░░│
├─────────────────────────────────────────────────────────────────────────┤
│ │
│ We renamed again for consistency and clarity. │
│ From now on, it is all `kashgari`. │
│ Changelog: https://github.com/BrikerMan/Kashgari/releases/tag/v1.0.0 │
│ │
│ | Backend | pypi version | desc | │
│ | ---------------- | -------------- | -------------- | │
│ | TensorFlow 2.x | kashgari 2.x.x | coming soon | │
│ | TensorFlow 1.14+ | kashgari 1.x.x | | │
│ | Keras | kashgari 0.x.x | legacy version | │
│ │
╰─────────────────────────────────────────────────────────────────────────╯
"""
def show_migration_guide():
requirements = subprocess.getoutput("pip freeze")
for package in requirements.splitlines():
if '==' in package:
package_name, package_version = package.split('==')
if package_name == 'kashgari-tf':
logging.warning(guide)
if __name__ == "__main__":
show_migration_guide()
print("hello, world")
| 125softNLP | /125softNLP-0.0.1-py3-none-any.whl/pysoftNLP/ner/kashgari/migration.py | migration.py |
# encoding: utf-8
# author: BrikerMan
# contact: eliyar917@gmail.com
# blog: https://eliyar.biz
# file: __version__.py.py
# time: 2019-05-20 16:32
__version__ = '1.0.0'
| 125softNLP | /125softNLP-0.0.1-py3-none-any.whl/pysoftNLP/ner/kashgari/version.py | version.py |
# encoding: utf-8
"""
@author: BrikerMan
@contact: eliyar917@gmail.com
@blog: https://eliyar.biz
@version: 1.0
@license: Apache Licence
@file: __init__.py
@time: 2019-05-17 11:15
"""
import os
os.environ['TF_KERAS'] = '1'
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import tensorflow as tf
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
import keras_bert
from kashgari.macros import TaskType, config
custom_objects = keras_bert.get_custom_objects()
CLASSIFICATION = TaskType.CLASSIFICATION
LABELING = TaskType.LABELING
SCORING = TaskType.SCORING
from kashgari.__version__ import __version__
from kashgari import layers
from kashgari import corpus
from kashgari import embeddings
from kashgari import macros
from kashgari import processors
from kashgari import tasks
from kashgari import utils
from kashgari import callbacks
from kashgari import migration
migration.show_migration_guide()
| 125softNLP | /125softNLP-0.0.1-py3-none-any.whl/pysoftNLP/ner/kashgari/__init__.py | __init__.py |
# encoding: utf-8
# author: BrikerMan
# contact: eliyar917@gmail.com
# blog: https://eliyar.biz
# file: base_tokenizer.py
# time: 11:24 上午
class Tokenizer:
"""Abstract base class for all implemented tokenizers.
"""
def tokenize(self, text: str):
"""
Tokenize text into token sequence
Args:
text: target text sample
Returns:
List of tokens in this sample
"""
return text.split(' ')
| 125softNLP | /125softNLP-0.0.1-py3-none-any.whl/pysoftNLP/ner/kashgari/tokenizer/base_tokenizer.py | base_tokenizer.py |
# encoding: utf-8
# author: BrikerMan
# contact: eliyar917@gmail.com
# blog: https://eliyar.biz
# file: bert_tokenizer.py
# time: 11:33 上午
# flake8: noqa: E127
import codecs
import os
import unicodedata
from kashgari.tokenizer.base_tokenizer import Tokenizer
TOKEN_PAD = '' # Token for padding
TOKEN_UNK = '[UNK]' # Token for unknown words
TOKEN_CLS = '[CLS]' # Token for classification
TOKEN_SEP = '[SEP]' # Token for separation
TOKEN_MASK = '[MASK]' # Token for masking
class BertTokenizer(Tokenizer):
"""
Bert Like Tokenizer, ref: https://github.com/CyberZHG/keras-bert/blob/master/keras_bert/tokenizer.py
"""
def __init__(self,
token_dict=None,
token_cls=TOKEN_CLS,
token_sep=TOKEN_SEP,
token_unk=TOKEN_UNK,
pad_index=0,
cased=False):
"""Initialize tokenizer.
:param token_dict: A dict maps tokens to indices.
:param token_cls: The token represents classification.
:param token_sep: The token represents separator.
:param token_unk: The token represents unknown token.
:param pad_index: The index to pad.
:param cased: Whether to keep the case.
"""
self._token_dict = token_dict
if self._token_dict:
self._token_dict_inv = {v: k for k, v in token_dict.items()}
else:
self._token_dict_inv = {}
self._token_cls = token_cls
self._token_sep = token_sep
self._token_unk = token_unk
self._pad_index = pad_index
self._cased = cased
@classmethod
def load_from_model(cls, model_path: str):
dict_path = os.path.join(model_path, 'vocab.txt')
token2idx = {}
with codecs.open(dict_path, 'r', 'utf8') as reader:
for line in reader:
token = line.strip()
token2idx[token] = len(token2idx)
return BertTokenizer(token_dict=token2idx)
@classmethod
def load_from_vacab_file(cls, vacab_path: str):
token2idx = {}
with codecs.open(vacab_path, 'r', 'utf8') as reader:
for line in reader:
token = line.strip()
token2idx[token] = len(token2idx)
return BertTokenizer(token_dict=token2idx)
def tokenize(self, first):
"""Split text to tokens.
:param first: First text.
:param second: Second text.
:return: A list of strings.
"""
tokens = self._tokenize(first)
return tokens
def _tokenize(self, text):
if not self._cased:
text = unicodedata.normalize('NFD', text)
text = ''.join([ch for ch in text if unicodedata.category(ch) != 'Mn'])
text = text.lower()
spaced = ''
for ch in text:
if self._is_punctuation(ch) or self._is_cjk_character(ch):
spaced += ' ' + ch + ' '
elif self._is_space(ch):
spaced += ' '
elif ord(ch) == 0 or ord(ch) == 0xfffd or self._is_control(ch):
continue
else:
spaced += ch
if self._token_dict:
tokens = []
for word in spaced.strip().split():
tokens += self._word_piece_tokenize(word)
return tokens
else:
return spaced.strip().split()
def _word_piece_tokenize(self, word):
if word in self._token_dict:
return [word]
tokens = []
start, stop = 0, 0
while start < len(word):
stop = len(word)
while stop > start:
sub = word[start:stop]
if start > 0:
sub = '##' + sub
if sub in self._token_dict:
break
stop -= 1
if start == stop:
stop += 1
tokens.append(sub)
start = stop
return tokens
@staticmethod
def _is_punctuation(ch): # noqa: E127
code = ord(ch)
return 33 <= code <= 47 or \
58 <= code <= 64 or \
91 <= code <= 96 or \
123 <= code <= 126 or \
unicodedata.category(ch).startswith('P')
@staticmethod
def _is_cjk_character(ch):
code = ord(ch)
return 0x4E00 <= code <= 0x9FFF or \
0x3400 <= code <= 0x4DBF or \
0x20000 <= code <= 0x2A6DF or \
0x2A700 <= code <= 0x2B73F or \
0x2B740 <= code <= 0x2B81F or \
0x2B820 <= code <= 0x2CEAF or \
0xF900 <= code <= 0xFAFF or \
0x2F800 <= code <= 0x2FA1F
@staticmethod
def _is_space(ch):
return ch == ' ' or ch == '\n' or ch == '\r' or ch == '\t' or unicodedata.category(ch) == 'Zs'
@staticmethod
def _is_control(ch):
return unicodedata.category(ch) in ('Cc', 'Cf')
| 125softNLP | /125softNLP-0.0.1-py3-none-any.whl/pysoftNLP/ner/kashgari/tokenizer/bert_tokenizer.py | bert_tokenizer.py |
# encoding: utf-8
# author: BrikerMan
# contact: eliyar917@gmail.com
# blog: https://eliyar.biz
# file: jieba_tokenizer.py
# time: 11:54 上午
from kashgari.tokenizer.base_tokenizer import Tokenizer
class JiebaTokenizer(Tokenizer):
"""Jieba tokenizer
"""
def __init__(self):
try:
import jieba
self._jieba = jieba
except ModuleNotFoundError:
raise ModuleNotFoundError("Jieba module not found, please install use `pip install jieba`")
def tokenize(self, text: str, **kwargs):
"""
Tokenize text into token sequence
Args:
text: target text sample
Returns:
List of tokens in this sample
"""
return list(self._jieba.cut(text, **kwargs))
| 125softNLP | /125softNLP-0.0.1-py3-none-any.whl/pysoftNLP/ner/kashgari/tokenizer/jieba_tokenizer.py | jieba_tokenizer.py |
# encoding: utf-8
# author: BrikerMan
# contact: eliyar917@gmail.com
# blog: https://eliyar.biz
# file: __init__.py
# time: 11:34 上午
from kashgari.tokenizer.base_tokenizer import Tokenizer
from kashgari.tokenizer.bert_tokenizer import BertTokenizer
from kashgari.tokenizer.jieba_tokenizer import JiebaTokenizer
| 125softNLP | /125softNLP-0.0.1-py3-none-any.whl/pysoftNLP/ner/kashgari/tokenizer/__init__.py | __init__.py |
# encoding: utf-8
# author: BrikerMan
# contact: eliyar917@gmail.com
# blog: https://eliyar.biz
# file: base_model.py
# time: 2019-05-22 11:21
import os
import json
import logging
import warnings
import pathlib
from typing import Dict, Any, List, Optional, Union, Tuple
import numpy as np
import tensorflow as tf
from tensorflow import keras
import kashgari
from kashgari import utils
from kashgari.embeddings import BareEmbedding
from kashgari.embeddings.base_embedding import Embedding
L = keras.layers
class BaseModel(object):
"""Base Sequence Labeling Model"""
@classmethod
def get_default_hyper_parameters(cls) -> Dict[str, Dict[str, Any]]:
raise NotImplementedError
def info(self):
model_json_str = self.tf_model.to_json()
return {
'config': {
'hyper_parameters': self.hyper_parameters,
},
'tf_model': json.loads(model_json_str),
'embedding': self.embedding.info(),
'class_name': self.__class__.__name__,
'module': self.__class__.__module__,
'tf_version': tf.__version__,
'kashgari_version': kashgari.__version__
}
@property
def task(self):
return self.embedding.task
@property
def token2idx(self) -> Dict[str, int]:
return self.embedding.token2idx
@property
def label2idx(self) -> Dict[str, int]:
return self.embedding.label2idx
@property
def pre_processor(self):
warnings.warn("The 'pre_processor' property is deprecated, "
"use 'processor' instead", DeprecationWarning, 2)
"""Deprecated. Use `self.processor` instead."""
return self.embedding.processor
@property
def processor(self):
return self.embedding.processor
def __init__(self,
embedding: Optional[Embedding] = None,
hyper_parameters: Optional[Dict[str, Dict[str, Any]]] = None):
"""
Args:
embedding: model embedding
hyper_parameters: a dict of hyper_parameters.
Examples:
You could change customize hyper_parameters like this::
# get default hyper_parameters
hyper_parameters = BLSTMModel.get_default_hyper_parameters()
# change lstm hidden unit to 12
hyper_parameters['layer_blstm']['units'] = 12
# init new model with customized hyper_parameters
labeling_model = BLSTMModel(hyper_parameters=hyper_parameters)
labeling_model.fit(x, y)
"""
if embedding is None:
self.embedding = BareEmbedding(task=self.__task__)
else:
self.embedding = embedding
self.tf_model: keras.Model = None
self.hyper_parameters = self.get_default_hyper_parameters()
self.model_info = {}
if hyper_parameters:
self.hyper_parameters.update(hyper_parameters)
def build_model(self,
x_train: Union[Tuple[List[List[str]], ...], List[List[str]]],
y_train: Union[List[List[str]], List[str]],
x_validate: Union[Tuple[List[List[str]], ...], List[List[str]]] = None,
y_validate: Union[List[List[str]], List[str]] = None):
"""
Build model with corpus
Args:
x_train: Array of train feature data (if the model has a single input),
or tuple of train feature data array (if the model has multiple inputs)
y_train: Array of train label data
x_validate: Array of validation feature data (if the model has a single input),
or tuple of validation feature data array (if the model has multiple inputs)
y_validate: Array of validation label data
Returns:
"""
if x_validate is not None and not isinstance(x_validate, tuple):
self.embedding.analyze_corpus(x_train + x_validate, y_train + y_validate)
else:
self.embedding.analyze_corpus(x_train, y_train)
if self.tf_model is None:
self.build_model_arc()
self.compile_model()
def build_multi_gpu_model(self,
gpus: int,
x_train: Union[Tuple[List[List[str]], ...], List[List[str]]],
y_train: Union[List[List[str]], List[str]],
cpu_merge: bool = True,
cpu_relocation: bool = False,
x_validate: Union[Tuple[List[List[str]], ...], List[List[str]]] = None,
y_validate: Union[List[List[str]], List[str]] = None):
"""
Build multi-GPU model with corpus
Args:
gpus: Integer >= 2, number of on GPUs on which to create model replicas.
cpu_merge: A boolean value to identify whether to force merging model weights
under the scope of the CPU or not.
cpu_relocation: A boolean value to identify whether to create the model's weights
under the scope of the CPU. If the model is not defined under any preceding device
scope, you can still rescue it by activating this option.
x_train: Array of train feature data (if the model has a single input),
or tuple of train feature data array (if the model has multiple inputs)
y_train: Array of train label data
x_validate: Array of validation feature data (if the model has a single input),
or tuple of validation feature data array (if the model has multiple inputs)
y_validate: Array of validation label data
Returns:
"""
if x_validate is not None and not isinstance(x_validate, tuple):
self.embedding.analyze_corpus(x_train + x_validate, y_train + y_validate)
else:
self.embedding.analyze_corpus(x_train, y_train)
if self.tf_model is None:
with utils.custom_object_scope():
self.build_model_arc()
self.tf_model = tf.keras.utils.multi_gpu_model(self.tf_model,
gpus,
cpu_merge=cpu_merge,
cpu_relocation=cpu_relocation)
self.compile_model()
def build_tpu_model(self, strategy: tf.contrib.distribute.TPUStrategy,
x_train: Union[Tuple[List[List[str]], ...], List[List[str]]],
y_train: Union[List[List[str]], List[str]],
x_validate: Union[Tuple[List[List[str]], ...], List[List[str]]] = None,
y_validate: Union[List[List[str]], List[str]] = None):
"""
Build TPU model with corpus
Args:
strategy: `TPUDistributionStrategy`. The strategy to use for replicating model
across multiple TPU cores.
x_train: Array of train feature data (if the model has a single input),
or tuple of train feature data array (if the model has multiple inputs)
y_train: Array of train label data
x_validate: Array of validation feature data (if the model has a single input),
or tuple of validation feature data array (if the model has multiple inputs)
y_validate: Array of validation label data
Returns:
"""
if x_validate is not None and not isinstance(x_validate, tuple):
self.embedding.analyze_corpus(x_train + x_validate, y_train + y_validate)
else:
self.embedding.analyze_corpus(x_train, y_train)
if self.tf_model is None:
with utils.custom_object_scope():
self.build_model_arc()
self.tf_model = tf.contrib.tpu.keras_to_tpu_model(self.tf_model, strategy=strategy)
self.compile_model(optimizer=tf.train.AdamOptimizer())
def get_data_generator(self,
x_data,
y_data,
batch_size: int = 64,
shuffle: bool = True):
"""
data generator for fit_generator
Args:
x_data: Array of feature data (if the model has a single input),
or tuple of feature data array (if the model has multiple inputs)
y_data: Array of label data
batch_size: Number of samples per gradient update, default to 64.
shuffle:
Returns:
data generator
"""
index_list = np.arange(len(x_data))
page_count = len(x_data) // batch_size + 1
while True:
if shuffle:
np.random.shuffle(index_list)
for page in range(page_count):
start_index = page * batch_size
end_index = start_index + batch_size
target_index = index_list[start_index: end_index]
if len(target_index) == 0:
target_index = index_list[0: batch_size]
x_tensor = self.embedding.process_x_dataset(x_data,
target_index)
y_tensor = self.embedding.process_y_dataset(y_data,
target_index)
yield (x_tensor, y_tensor)
def fit(self,
x_train: Union[Tuple[List[List[str]], ...], List[List[str]]],
y_train: Union[List[List[str]], List[str]],
x_validate: Union[Tuple[List[List[str]], ...], List[List[str]]] = None,
y_validate: Union[List[List[str]], List[str]] = None,
batch_size: int = 64,
epochs: int = 5,
callbacks: List[keras.callbacks.Callback] = None,
fit_kwargs: Dict = None,
shuffle: bool = True):
"""
Trains the model for a given number of epochs with fit_generator (iterations on a dataset).
Args:
x_train: Array of train feature data (if the model has a single input),
or tuple of train feature data array (if the model has multiple inputs)
y_train: Array of train label data
x_validate: Array of validation feature data (if the model has a single input),
or tuple of validation feature data array (if the model has multiple inputs)
y_validate: Array of validation label data
batch_size: Number of samples per gradient update, default to 64.
epochs: Integer. Number of epochs to train the model. default 5.
callbacks:
fit_kwargs: fit_kwargs: additional arguments passed to ``fit_generator()`` function from
``tensorflow.keras.Model``
- https://www.tensorflow.org/api_docs/python/tf/keras/models/Model#fit_generator
shuffle:
Returns:
"""
self.build_model(x_train, y_train, x_validate, y_validate)
train_generator = self.get_data_generator(x_train,
y_train,
batch_size,
shuffle)
if fit_kwargs is None:
fit_kwargs = {}
validation_generator = None
validation_steps = None
if x_validate:
validation_generator = self.get_data_generator(x_validate,
y_validate,
batch_size,
shuffle)
if isinstance(x_validate, tuple):
validation_steps = len(x_validate[0]) // batch_size + 1
else:
validation_steps = len(x_validate) // batch_size + 1
if isinstance(x_train, tuple):
steps_per_epoch = len(x_train[0]) // batch_size + 1
else:
steps_per_epoch = len(x_train) // batch_size + 1
with utils.custom_object_scope():
return self.tf_model.fit_generator(train_generator,
steps_per_epoch=steps_per_epoch,
epochs=epochs,
validation_data=validation_generator,
validation_steps=validation_steps,
callbacks=callbacks,
**fit_kwargs)
def fit_without_generator(self,
x_train: Union[Tuple[List[List[str]], ...], List[List[str]]],
y_train: Union[List[List[str]], List[str]],
x_validate: Union[Tuple[List[List[str]], ...], List[List[str]]] = None,
y_validate: Union[List[List[str]], List[str]] = None,
batch_size: int = 64,
epochs: int = 5,
callbacks: List[keras.callbacks.Callback] = None,
fit_kwargs: Dict = None):
"""
Trains the model for a given number of epochs (iterations on a dataset).
Args:
x_train: Array of train feature data (if the model has a single input),
or tuple of train feature data array (if the model has multiple inputs)
y_train: Array of train label data
x_validate: Array of validation feature data (if the model has a single input),
or tuple of validation feature data array (if the model has multiple inputs)
y_validate: Array of validation label data
batch_size: Number of samples per gradient update, default to 64.
epochs: Integer. Number of epochs to train the model. default 5.
callbacks:
fit_kwargs: fit_kwargs: additional arguments passed to ``fit_generator()`` function from
``tensorflow.keras.Model``
- https://www.tensorflow.org/api_docs/python/tf/keras/models/Model#fit_generator
Returns:
"""
self.build_model(x_train, y_train, x_validate, y_validate)
tensor_x = self.embedding.process_x_dataset(x_train)
tensor_y = self.embedding.process_y_dataset(y_train)
validation_data = None
if x_validate is not None:
tensor_valid_x = self.embedding.process_x_dataset(x_validate)
tensor_valid_y = self.embedding.process_y_dataset(y_validate)
validation_data = (tensor_valid_x, tensor_valid_y)
if fit_kwargs is None:
fit_kwargs = {}
if callbacks and 'callbacks' not in fit_kwargs:
fit_kwargs['callbacks'] = callbacks
with utils.custom_object_scope():
return self.tf_model.fit(tensor_x, tensor_y,
validation_data=validation_data,
epochs=epochs,
batch_size=batch_size,
**fit_kwargs)
def compile_model(self, **kwargs):
"""Configures the model for training.
Using ``compile()`` function of ``tf.keras.Model`` -
https://www.tensorflow.org/api_docs/python/tf/keras/models/Model#compile
Args:
**kwargs: arguments passed to ``compile()`` function of ``tf.keras.Model``
Defaults:
- loss: ``categorical_crossentropy``
- optimizer: ``adam``
- metrics: ``['accuracy']``
"""
if kwargs.get('loss') is None:
kwargs['loss'] = 'categorical_crossentropy'
if kwargs.get('optimizer') is None:
kwargs['optimizer'] = 'adam'
if kwargs.get('metrics') is None:
kwargs['metrics'] = ['accuracy']
self.tf_model.compile(**kwargs)
if not kashgari.config.disable_auto_summary:
self.tf_model.summary()
def predict(self,
x_data,
batch_size=32,
debug_info=False,
predict_kwargs: Dict = None):
"""
Generates output predictions for the input samples.
Computation is done in batches.
Args:
x_data: The input data, as a Numpy array (or list of Numpy arrays if the model has multiple inputs).
batch_size: Integer. If unspecified, it will default to 32.
debug_info: Bool, Should print out the logging info.
predict_kwargs: arguments passed to ``predict()`` function of ``tf.keras.Model``
Returns:
array(s) of predictions.
"""
if predict_kwargs is None:
predict_kwargs = {}
with utils.custom_object_scope():
if isinstance(x_data, tuple):
lengths = [len(sen) for sen in x_data[0]]
else:
lengths = [len(sen) for sen in x_data]
tensor = self.embedding.process_x_dataset(x_data)
pred = self.tf_model.predict(tensor, batch_size=batch_size, **predict_kwargs)
if self.task == 'scoring':
t_pred = pred
else:
t_pred = pred.argmax(-1)
res = self.embedding.reverse_numerize_label_sequences(t_pred,
lengths)
if debug_info:
print('input: {}'.format(tensor))
print('output: {}'.format(pred))
print('output argmax: {}'.format(t_pred))
return res
def evaluate(self,
x_data,
y_data,
batch_size=None,
digits=4,
debug_info=False) -> Tuple[float, float, Dict]:
"""
Evaluate model
Args:
x_data:
y_data:
batch_size:
digits:
debug_info:
Returns:
"""
raise NotImplementedError
def build_model_arc(self):
raise NotImplementedError
def save(self, model_path: str):
"""
Save model
Args:
model_path:
Returns:
"""
pathlib.Path(model_path).mkdir(exist_ok=True, parents=True)
with open(os.path.join(model_path, 'model_info.json'), 'w') as f:
f.write(json.dumps(self.info(), indent=2, ensure_ascii=True))
f.close()
self.tf_model.save_weights(os.path.join(model_path, 'model_weights.h5'))
logging.info('model saved to {}'.format(os.path.abspath(model_path)))
if __name__ == "__main__":
from kashgari.tasks.labeling import CNN_LSTM_Model
from kashgari.corpus import ChineseDailyNerCorpus
train_x, train_y = ChineseDailyNerCorpus.load_data('valid')
model = CNN_LSTM_Model()
model.build_model(train_x[:100], train_y[:100])
r = model.predict_entities(train_x[:5])
model.save('./res')
import pprint
pprint.pprint(r)
model.evaluate(train_x[:20], train_y[:20])
print("Hello world")
print(model.predict(train_x[:20]))
| 125softNLP | /125softNLP-0.0.1-py3-none-any.whl/pysoftNLP/ner/kashgari/tasks/base_model.py | base_model.py |
# encoding: utf-8
# author: BrikerMan
# contact: eliyar917@gmail.com
# blog: https://eliyar.biz
# file: __init__.py
# time: 2019-05-20 11:34
from kashgari.tasks import labeling
from kashgari.tasks import classification
if __name__ == "__main__":
print("Hello world")
| 125softNLP | /125softNLP-0.0.1-py3-none-any.whl/pysoftNLP/ner/kashgari/tasks/__init__.py | __init__.py |
# encoding: utf-8
# author: BrikerMan
# contact: eliyar917@gmail.com
# blog: https://eliyar.biz
# file: base_model.py
# time: 2019-05-20 13:07
from typing import Dict, Any, Tuple
import random
import logging
from seqeval.metrics import classification_report
from seqeval.metrics.sequence_labeling import get_entities
from kashgari.tasks.base_model import BaseModel
class BaseLabelingModel(BaseModel):
"""Base Sequence Labeling Model"""
__task__ = 'labeling'
@classmethod
def get_default_hyper_parameters(cls) -> Dict[str, Dict[str, Any]]:
raise NotImplementedError
def predict_entities(self,
x_data,
batch_size=None,
join_chunk=' ',
debug_info=False,
predict_kwargs: Dict = None):
"""Gets entities from sequence.
Args:
x_data: The input data, as a Numpy array (or list of Numpy arrays if the model has multiple inputs).
batch_size: Integer. If unspecified, it will default to 32.
join_chunk: str or False,
debug_info: Bool, Should print out the logging info.
predict_kwargs: arguments passed to ``predict()`` function of ``tf.keras.Model``
Returns:
list: list of entity.
"""
if isinstance(x_data, tuple):
text_seq = x_data[0]
else:
text_seq = x_data
res = self.predict(x_data, batch_size, debug_info, predict_kwargs)
new_res = [get_entities(seq) for seq in res]
final_res = []
for index, seq in enumerate(new_res):
seq_data = []
for entity in seq:
if join_chunk is False:
value = text_seq[index][entity[1]:entity[2] + 1],
else:
value = join_chunk.join(text_seq[index][entity[1]:entity[2] + 1])
seq_data.append({
"entity": entity[0],
"start": entity[1],
"end": entity[2],
"value": value,
})
final_res.append({
'text': join_chunk.join(text_seq[index]),
'text_raw': text_seq[index],
'labels': seq_data
})
return final_res
def evaluate(self,
x_data,
y_data,
batch_size=None,
digits=4,
debug_info=False) -> Tuple[float, float, Dict]:
"""
Build a text report showing the main classification metrics.
Args:
x_data:
y_data:
batch_size:
digits:
debug_info:
Returns:
"""
y_pred = self.predict(x_data, batch_size=batch_size)
y_true = [seq[:len(y_pred[index])] for index, seq in enumerate(y_data)]
new_y_pred = []
for x in y_pred:
new_y_pred.append([str(i) for i in x])
new_y_true = []
for x in y_true:
new_y_true.append([str(i) for i in x])
if debug_info:
for index in random.sample(list(range(len(x_data))), 5):
logging.debug('------ sample {} ------'.format(index))
logging.debug('x : {}'.format(x_data[index]))
logging.debug('y_true : {}'.format(y_true[index]))
logging.debug('y_pred : {}'.format(y_pred[index]))
report = classification_report(y_true, y_pred, digits=digits)
print(classification_report(y_true, y_pred, digits=digits))
return report
def build_model_arc(self):
raise NotImplementedError
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG)
from kashgari.tasks.labeling import BiLSTM_Model
from kashgari.corpus import ChineseDailyNerCorpus
from kashgari.utils import load_model
train_x, train_y = ChineseDailyNerCorpus.load_data('train', shuffle=False)
valid_x, valid_y = ChineseDailyNerCorpus.load_data('valid')
train_x, train_y = train_x[:5120], train_y[:5120]
model = load_model('/Users/brikerman/Desktop/blstm_model')
# model.build_model(train_x[:100], train_y[:100])
# model.fit(train_x[:1000], train_y[:1000], epochs=10)
# model.evaluate(train_x[:20], train_y[:20])
print("Hello world")
| 125softNLP | /125softNLP-0.0.1-py3-none-any.whl/pysoftNLP/ner/kashgari/tasks/labeling/base_model.py | base_model.py |
# encoding: utf-8
# author: BrikerMan
# contact: eliyar917@gmail.com
# blog: https://eliyar.biz
# file: experimental.py
# time: 2019-05-22 19:35
from typing import Dict, Any
from tensorflow import keras
import kashgari
from kashgari.tasks.labeling.base_model import BaseLabelingModel
from kashgari.layers import L
from keras_self_attention import SeqSelfAttention
class BLSTMAttentionModel(BaseLabelingModel):
"""Bidirectional LSTM Self Attention Sequence Labeling Model"""
@classmethod
def get_default_hyper_parameters(cls) -> Dict[str, Dict[str, Any]]:
"""
Get hyper parameters of model
Returns:
hyper parameters dict
"""
return {
'layer_blstm': {
'units': 64,
'return_sequences': True
},
'layer_self_attention': {
'attention_activation': 'sigmoid'
},
'layer_dropout': {
'rate': 0.5
},
'layer_time_distributed': {},
'layer_activation': {
'activation': 'softmax'
}
}
def build_model_arc(self):
"""
build model architectural
"""
output_dim = len(self.processor.label2idx)
config = self.hyper_parameters
embed_model = self.embedding.embed_model
layer_blstm = L.Bidirectional(L.LSTM(**config['layer_blstm']),
name='layer_blstm')
layer_self_attention = SeqSelfAttention(**config['layer_self_attention'],
name='layer_self_attention')
layer_dropout = L.Dropout(**config['layer_dropout'],
name='layer_dropout')
layer_time_distributed = L.TimeDistributed(L.Dense(output_dim,
**config['layer_time_distributed']),
name='layer_time_distributed')
layer_activation = L.Activation(**config['layer_activation'])
tensor = layer_blstm(embed_model.output)
tensor = layer_self_attention(tensor)
tensor = layer_dropout(tensor)
tensor = layer_time_distributed(tensor)
output_tensor = layer_activation(tensor)
self.tf_model = keras.Model(embed_model.inputs, output_tensor)
# Register custom layer
kashgari.custom_objects['SeqSelfAttention'] = SeqSelfAttention
if __name__ == "__main__":
print("Hello world")
| 125softNLP | /125softNLP-0.0.1-py3-none-any.whl/pysoftNLP/ner/kashgari/tasks/labeling/experimental.py | experimental.py |
# encoding: utf-8
# author: BrikerMan
# contact: eliyar917@gmail.com
# blog: https://eliyar.biz
# file: __init__.py
# time: 2019-05-20 11:34
from kashgari.tasks.labeling.models import CNN_LSTM_Model
from kashgari.tasks.labeling.models import BiLSTM_Model
from kashgari.tasks.labeling.models import BiLSTM_CRF_Model
from kashgari.tasks.labeling.models import BiGRU_Model
from kashgari.tasks.labeling.models import BiGRU_CRF_Model
if __name__ == "__main__":
print("Hello world")
| 125softNLP | /125softNLP-0.0.1-py3-none-any.whl/pysoftNLP/ner/kashgari/tasks/labeling/__init__.py | __init__.py |
# encoding: utf-8
# author: BrikerMan
# contact: eliyar917@gmail.com
# blog: https://eliyar.biz
# file: models.py
# time: 2019-05-20 11:13
import logging
from typing import Dict, Any
from tensorflow import keras
from kashgari.tasks.labeling.base_model import BaseLabelingModel
from kashgari.layers import L
from kashgari.layers.crf import CRF
from kashgari.utils import custom_objects
custom_objects['CRF'] = CRF
class BiLSTM_Model(BaseLabelingModel):
"""Bidirectional LSTM Sequence Labeling Model"""
@classmethod
def get_default_hyper_parameters(cls) -> Dict[str, Dict[str, Any]]:
"""
Get hyper parameters of model
Returns:
hyper parameters dict
"""
return {
'layer_blstm': {
'units': 128,
'return_sequences': True
},
'layer_dropout': {
'rate': 0.4
},
'layer_time_distributed': {},
'layer_activation': {
'activation': 'softmax'
}
}
def build_model_arc(self):
"""
build model architectural
"""
output_dim = len(self.processor.label2idx)
config = self.hyper_parameters
embed_model = self.embedding.embed_model
layer_blstm = L.Bidirectional(L.LSTM(**config['layer_blstm']),
name='layer_blstm')
layer_dropout = L.Dropout(**config['layer_dropout'],
name='layer_dropout')
layer_time_distributed = L.TimeDistributed(L.Dense(output_dim,
**config['layer_time_distributed']),
name='layer_time_distributed')
layer_activation = L.Activation(**config['layer_activation'])
tensor = layer_blstm(embed_model.output)
tensor = layer_dropout(tensor)
tensor = layer_time_distributed(tensor)
output_tensor = layer_activation(tensor)
self.tf_model = keras.Model(embed_model.inputs, output_tensor)
class BiLSTM_CRF_Model(BaseLabelingModel):
"""Bidirectional LSTM CRF Sequence Labeling Model"""
@classmethod
def get_default_hyper_parameters(cls) -> Dict[str, Dict[str, Any]]:
"""
Get hyper parameters of model
Returns:
hyper parameters dict
"""
return {
'layer_blstm': {
'units': 128,
'return_sequences': True
},
'layer_dense': {
'units': 64,
'activation': 'tanh'
}
}
def build_model_arc(self):
"""
build model architectural
"""
output_dim = len(self.processor.label2idx)
config = self.hyper_parameters
embed_model = self.embedding.embed_model
layer_blstm = L.Bidirectional(L.LSTM(**config['layer_blstm']),
name='layer_blstm')
layer_dense = L.Dense(**config['layer_dense'], name='layer_dense')
layer_crf_dense = L.Dense(output_dim, name='layer_crf_dense')
layer_crf = CRF(output_dim, name='layer_crf')
tensor = layer_blstm(embed_model.output)
tensor = layer_dense(tensor)
tensor = layer_crf_dense(tensor)
output_tensor = layer_crf(tensor)
self.layer_crf = layer_crf
self.tf_model = keras.Model(embed_model.inputs, output_tensor)
def compile_model(self, **kwargs):
if kwargs.get('loss') is None:
kwargs['loss'] = self.layer_crf.loss
if kwargs.get('metrics') is None:
kwargs['metrics'] = [self.layer_crf.viterbi_accuracy]
super(BiLSTM_CRF_Model, self).compile_model(**kwargs)
class BiGRU_Model(BaseLabelingModel):
"""Bidirectional GRU Sequence Labeling Model"""
@classmethod
def get_default_hyper_parameters(cls) -> Dict[str, Dict[str, Any]]:
"""
Get hyper parameters of model
Returns:
hyper parameters dict
"""
return {
'layer_bgru': {
'units': 128,
'return_sequences': True
},
'layer_dropout': {
'rate': 0.4
},
'layer_time_distributed': {},
'layer_activation': {
'activation': 'softmax'
}
}
def build_model_arc(self):
"""
build model architectural
"""
output_dim = len(self.processor.label2idx)
config = self.hyper_parameters
embed_model = self.embedding.embed_model
layer_blstm = L.Bidirectional(L.GRU(**config['layer_bgru']),
name='layer_bgru')
layer_dropout = L.Dropout(**config['layer_dropout'],
name='layer_dropout')
layer_time_distributed = L.TimeDistributed(L.Dense(output_dim,
**config['layer_time_distributed']),
name='layer_time_distributed')
layer_activation = L.Activation(**config['layer_activation'])
tensor = layer_blstm(embed_model.output)
tensor = layer_dropout(tensor)
tensor = layer_time_distributed(tensor)
output_tensor = layer_activation(tensor)
self.tf_model = keras.Model(embed_model.inputs, output_tensor)
class BiGRU_CRF_Model(BaseLabelingModel):
"""Bidirectional GRU CRF Sequence Labeling Model"""
@classmethod
def get_default_hyper_parameters(cls) -> Dict[str, Dict[str, Any]]:
"""
Get hyper parameters of model
Returns:
hyper parameters dict
"""
return {
'layer_bgru': {
'units': 128,
'return_sequences': True
},
'layer_dense': {
'units': 64,
'activation': 'tanh'
}
}
def build_model_arc(self):
"""
build model architectural
"""
output_dim = len(self.processor.label2idx)
config = self.hyper_parameters
embed_model = self.embedding.embed_model
layer_blstm = L.Bidirectional(L.GRU(**config['layer_bgru']),
name='layer_bgru')
layer_dense = L.Dense(**config['layer_dense'], name='layer_dense')
layer_crf_dense = L.Dense(output_dim, name='layer_crf_dense')
layer_crf = CRF(output_dim, name='layer_crf')
tensor = layer_blstm(embed_model.output)
tensor = layer_dense(tensor)
tensor = layer_crf_dense(tensor)
output_tensor = layer_crf(tensor)
self.layer_crf = layer_crf
self.tf_model = keras.Model(embed_model.inputs, output_tensor)
def compile_model(self, **kwargs):
if kwargs.get('loss') is None:
kwargs['loss'] = self.layer_crf.loss
if kwargs.get('metrics') is None:
kwargs['metrics'] = [self.layer_crf.viterbi_accuracy]
super(BiGRU_CRF_Model, self).compile_model(**kwargs)
class CNN_LSTM_Model(BaseLabelingModel):
"""CNN LSTM Sequence Labeling Model"""
@classmethod
def get_default_hyper_parameters(cls) -> Dict[str, Dict[str, Any]]:
"""
Get hyper parameters of model
Returns:
hyper parameters dict
"""
return {
'layer_conv': {
'filters': 32,
'kernel_size': 3,
'padding': 'same',
'activation': 'relu'
},
'layer_lstm': {
'units': 128,
'return_sequences': True
},
'layer_dropout': {
'rate': 0.4
},
'layer_time_distributed': {},
'layer_activation': {
'activation': 'softmax'
}
}
def build_model_arc(self):
"""
build model architectural
"""
output_dim = len(self.processor.label2idx)
config = self.hyper_parameters
embed_model = self.embedding.embed_model
layer_conv = L.Conv1D(**config['layer_conv'],
name='layer_conv')
layer_lstm = L.LSTM(**config['layer_lstm'],
name='layer_lstm')
layer_dropout = L.Dropout(**config['layer_dropout'],
name='layer_dropout')
layer_time_distributed = L.TimeDistributed(L.Dense(output_dim,
**config['layer_time_distributed']),
name='layer_time_distributed')
layer_activation = L.Activation(**config['layer_activation'])
tensor = layer_conv(embed_model.output)
tensor = layer_lstm(tensor)
tensor = layer_dropout(tensor)
tensor = layer_time_distributed(tensor)
output_tensor = layer_activation(tensor)
self.tf_model = keras.Model(embed_model.inputs, output_tensor)
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG)
from kashgari.corpus import ChineseDailyNerCorpus
valid_x, valid_y = ChineseDailyNerCorpus.load_data('train')
model = BiLSTM_CRF_Model()
model.fit(valid_x, valid_y, epochs=50, batch_size=64)
model.evaluate(valid_x, valid_y)
| 125softNLP | /125softNLP-0.0.1-py3-none-any.whl/pysoftNLP/ner/kashgari/tasks/labeling/models.py | models.py |
# encoding: utf-8
# author: BrikerMan
# contact: eliyar917@gmail.com
# blog: https://eliyar.biz
# file: base_classification_model.py
# time: 2019-05-22 11:23
import random
import logging
import kashgari
from typing import Dict, Any, Tuple, Optional, List
from kashgari.tasks.base_model import BaseModel, BareEmbedding
from kashgari.embeddings.base_embedding import Embedding
from sklearn import metrics
class BaseClassificationModel(BaseModel):
__task__ = 'classification'
def __init__(self,
embedding: Optional[Embedding] = None,
hyper_parameters: Optional[Dict[str, Dict[str, Any]]] = None):
super(BaseClassificationModel, self).__init__(embedding, hyper_parameters)
if hyper_parameters is None and \
self.embedding.processor.__getattribute__('multi_label') is True:
last_layer_name = list(self.hyper_parameters.keys())[-1]
self.hyper_parameters[last_layer_name]['activation'] = 'sigmoid'
logging.warning("Activation Layer's activate function changed to sigmoid for"
" multi-label classification question")
@classmethod
def get_default_hyper_parameters(cls) -> Dict[str, Dict[str, Any]]:
raise NotImplementedError
def build_model_arc(self):
raise NotImplementedError
def compile_model(self, **kwargs):
if kwargs.get('loss') is None and self.embedding.processor.multi_label:
kwargs['loss'] = 'binary_crossentropy'
super(BaseClassificationModel, self).compile_model(**kwargs)
def predict(self,
x_data,
batch_size=32,
multi_label_threshold: float = 0.5,
debug_info=False,
predict_kwargs: Dict = None):
"""
Generates output predictions for the input samples.
Computation is done in batches.
Args:
x_data: The input data, as a Numpy array (or list of Numpy arrays if the model has multiple inputs).
batch_size: Integer. If unspecified, it will default to 32.
multi_label_threshold:
debug_info: Bool, Should print out the logging info.
predict_kwargs: arguments passed to ``predict()`` function of ``tf.keras.Model``
Returns:
array(s) of predictions.
"""
with kashgari.utils.custom_object_scope():
tensor = self.embedding.process_x_dataset(x_data)
pred = self.tf_model.predict(tensor, batch_size=batch_size)
if self.embedding.processor.multi_label:
if debug_info:
logging.info('raw output: {}'.format(pred))
pred[pred >= multi_label_threshold] = 1
pred[pred < multi_label_threshold] = 0
else:
pred = pred.argmax(-1)
res = self.embedding.reverse_numerize_label_sequences(pred)
if debug_info:
logging.info('input: {}'.format(tensor))
logging.info('output: {}'.format(pred))
logging.info('output argmax: {}'.format(pred.argmax(-1)))
return res
def predict_top_k_class(self,
x_data,
top_k=5,
batch_size=32,
debug_info=False,
predict_kwargs: Dict = None) -> List[Dict]:
"""
Generates output predictions with confidence for the input samples.
Computation is done in batches.
Args:
x_data: The input data, as a Numpy array (or list of Numpy arrays if the model has multiple inputs).
top_k: int
batch_size: Integer. If unspecified, it will default to 32.
debug_info: Bool, Should print out the logging info.
predict_kwargs: arguments passed to ``predict()`` function of ``tf.keras.Model``
Returns:
array(s) of predictions.
single-label classification:
[
{
"label": "chat",
"confidence": 0.5801531,
"candidates": [
{ "label": "cookbook", "confidence": 0.1886314 },
{ "label": "video", "confidence": 0.13805099 },
{ "label": "health", "confidence": 0.013852648 },
{ "label": "translation", "confidence": 0.012913573 }
]
}
]
multi-label classification:
[
{
"candidates": [
{ "confidence": 0.9959336, "label": "toxic" },
{ "confidence": 0.9358089, "label": "obscene" },
{ "confidence": 0.6882098, "label": "insult" },
{ "confidence": 0.13540423, "label": "severe_toxic" },
{ "confidence": 0.017219543, "label": "identity_hate" }
]
}
]
"""
if predict_kwargs is None:
predict_kwargs = {}
with kashgari.utils.custom_object_scope():
tensor = self.embedding.process_x_dataset(x_data)
pred = self.tf_model.predict(tensor, batch_size=batch_size, **predict_kwargs)
new_results = []
for sample_prob in pred:
sample_res = zip(self.label2idx.keys(), sample_prob)
sample_res = sorted(sample_res, key=lambda k: k[1], reverse=True)
data = {}
for label, confidence in sample_res[:top_k]:
if 'candidates' not in data:
if self.embedding.processor.multi_label:
data['candidates'] = []
else:
data['label'] = label
data['confidence'] = confidence
data['candidates'] = []
continue
data['candidates'].append({
'label': label,
'confidence': confidence
})
new_results.append(data)
if debug_info:
logging.info('input: {}'.format(tensor))
logging.info('output: {}'.format(pred))
logging.info('output argmax: {}'.format(pred.argmax(-1)))
return new_results
def evaluate(self,
x_data,
y_data,
batch_size=None,
digits=4,
output_dict=False,
debug_info=False) -> Optional[Tuple[float, float, Dict]]:
y_pred = self.predict(x_data, batch_size=batch_size)
if debug_info:
for index in random.sample(list(range(len(x_data))), 5):
logging.debug('------ sample {} ------'.format(index))
logging.debug('x : {}'.format(x_data[index]))
logging.debug('y : {}'.format(y_data[index]))
logging.debug('y_pred : {}'.format(y_pred[index]))
if self.processor.multi_label:
y_pred_b = self.processor.multi_label_binarizer.fit_transform(y_pred)
y_true_b = self.processor.multi_label_binarizer.fit_transform(y_data)
report = metrics.classification_report(y_pred_b,
y_true_b,
target_names=self.processor.multi_label_binarizer.classes_,
output_dict=output_dict,
digits=digits)
else:
report = metrics.classification_report(y_data,
y_pred,
output_dict=output_dict,
digits=digits)
if not output_dict:
print(report)
else:
return report
if __name__ == "__main__":
print("Hello world")
| 125softNLP | /125softNLP-0.0.1-py3-none-any.whl/pysoftNLP/ner/kashgari/tasks/classification/base_model.py | base_model.py |
# encoding: utf-8
# author: BrikerMan
# contact: eliyar917@gmail.com
# blog: https://eliyar.biz
# file: __init__.py
# time: 2019-05-22 12:40
from kashgari.tasks.classification.models import BiLSTM_Model
from kashgari.tasks.classification.models import BiGRU_Model
from kashgari.tasks.classification.models import CNN_Model
from kashgari.tasks.classification.models import CNN_LSTM_Model
from kashgari.tasks.classification.models import CNN_GRU_Model
from kashgari.tasks.classification.models import AVCNN_Model
from kashgari.tasks.classification.models import KMax_CNN_Model
from kashgari.tasks.classification.models import R_CNN_Model
from kashgari.tasks.classification.models import AVRNN_Model
from kashgari.tasks.classification.models import Dropout_BiGRU_Model
from kashgari.tasks.classification.models import Dropout_AVRNN_Model
from kashgari.tasks.classification.dpcnn_model import DPCNN_Model
BLSTMModel = BiLSTM_Model
BGRUModel = BiGRU_Model
CNNModel = CNN_Model
CNNLSTMModel = CNN_LSTM_Model
CNNGRUModel = CNN_GRU_Model
AVCNNModel = AVCNN_Model
KMaxCNNModel = KMax_CNN_Model
RCNNModel = R_CNN_Model
AVRNNModel = AVRNN_Model
DropoutBGRUModel = Dropout_BiGRU_Model
DropoutAVRNNModel = Dropout_AVRNN_Model
DPCNN = DPCNN_Model
| 125softNLP | /125softNLP-0.0.1-py3-none-any.whl/pysoftNLP/ner/kashgari/tasks/classification/__init__.py | __init__.py |
# encoding: utf-8
# author: BrikerMan
# contact: eliyar917@gmail.com
# blog: https://eliyar.biz
# file: models.py
# time: 2019-05-22 11:26
import logging
import tensorflow as tf
from typing import Dict, Any
from kashgari.layers import L, AttentionWeightedAverageLayer, KMaxPoolingLayer
from kashgari.tasks.classification.base_model import BaseClassificationModel
class BiLSTM_Model(BaseClassificationModel):
@classmethod
def get_default_hyper_parameters(cls) -> Dict[str, Dict[str, Any]]:
return {
'layer_bi_lstm': {
'units': 128,
'return_sequences': False
},
'layer_dense': {
'activation': 'softmax'
}
}
def build_model_arc(self):
output_dim = len(self.processor.label2idx)
config = self.hyper_parameters
embed_model = self.embedding.embed_model
layer_bi_lstm = L.Bidirectional(L.LSTM(**config['layer_bi_lstm']))
layer_dense = L.Dense(output_dim, **config['layer_dense'])
tensor = layer_bi_lstm(embed_model.output)
output_tensor = layer_dense(tensor)
self.tf_model = tf.keras.Model(embed_model.inputs, output_tensor)
class BiGRU_Model(BaseClassificationModel):
@classmethod
def get_default_hyper_parameters(cls) -> Dict[str, Dict[str, Any]]:
return {
'layer_bi_gru': {
'units': 128,
'return_sequences': False
},
'layer_dense': {
'activation': 'softmax'
}
}
def build_model_arc(self):
output_dim = len(self.processor.label2idx)
config = self.hyper_parameters
embed_model = self.embedding.embed_model
layer_bi_gru = L.Bidirectional(L.GRU(**config['layer_bi_gru']))
layer_dense = L.Dense(output_dim, **config['layer_dense'])
tensor = layer_bi_gru(embed_model.output)
output_tensor = layer_dense(tensor)
self.tf_model = tf.keras.Model(embed_model.inputs, output_tensor)
class CNN_Model(BaseClassificationModel):
@classmethod
def get_default_hyper_parameters(cls) -> Dict[str, Dict[str, Any]]:
return {
'conv1d_layer': {
'filters': 128,
'kernel_size': 5,
'activation': 'relu'
},
'max_pool_layer': {},
'dense_layer': {
'units': 64,
'activation': 'relu'
},
'activation_layer': {
'activation': 'softmax'
},
}
def build_model_arc(self):
output_dim = len(self.processor.label2idx)
config = self.hyper_parameters
embed_model = self.embedding.embed_model
# build model structure in sequent way
layers_seq = []
layers_seq.append(L.Conv1D(**config['conv1d_layer']))
layers_seq.append(L.GlobalMaxPooling1D(**config['max_pool_layer']))
layers_seq.append(L.Dense(**config['dense_layer']))
layers_seq.append(L.Dense(output_dim, **config['activation_layer']))
tensor = embed_model.output
for layer in layers_seq:
tensor = layer(tensor)
self.tf_model = tf.keras.Model(embed_model.inputs, tensor)
class CNN_LSTM_Model(BaseClassificationModel):
@classmethod
def get_default_hyper_parameters(cls) -> Dict[str, Dict[str, Any]]:
return {
'conv_layer': {
'filters': 32,
'kernel_size': 3,
'padding': 'same',
'activation': 'relu'
},
'max_pool_layer': {
'pool_size': 2
},
'lstm_layer': {
'units': 100
},
'activation_layer': {
'activation': 'softmax'
},
}
def build_model_arc(self):
output_dim = len(self.processor.label2idx)
config = self.hyper_parameters
embed_model = self.embedding.embed_model
layers_seq = []
layers_seq.append(L.Conv1D(**config['conv_layer']))
layers_seq.append(L.MaxPooling1D(**config['max_pool_layer']))
layers_seq.append(L.LSTM(**config['lstm_layer']))
layers_seq.append(L.Dense(output_dim, **config['activation_layer']))
tensor = embed_model.output
for layer in layers_seq:
tensor = layer(tensor)
self.tf_model = tf.keras.Model(embed_model.inputs, tensor)
class CNN_GRU_Model(BaseClassificationModel):
@classmethod
def get_default_hyper_parameters(cls) -> Dict[str, Dict[str, Any]]:
return {
'conv_layer': {
'filters': 32,
'kernel_size': 3,
'padding': 'same',
'activation': 'relu'
},
'max_pool_layer': {
'pool_size': 2
},
'gru_layer': {
'units': 100
},
'activation_layer': {
'activation': 'softmax'
},
}
def build_model_arc(self):
output_dim = len(self.processor.label2idx)
config = self.hyper_parameters
embed_model = self.embedding.embed_model
layers_seq = []
layers_seq.append(L.Conv1D(**config['conv_layer']))
layers_seq.append(L.MaxPooling1D(**config['max_pool_layer']))
layers_seq.append(L.GRU(**config['gru_layer']))
layers_seq.append(L.Dense(output_dim, **config['activation_layer']))
tensor = embed_model.output
for layer in layers_seq:
tensor = layer(tensor)
self.tf_model = tf.keras.Model(embed_model.inputs, tensor)
class AVCNN_Model(BaseClassificationModel):
@classmethod
def get_default_hyper_parameters(cls) -> Dict[str, Dict[str, Any]]:
return {
'spatial_dropout': {
'rate': 0.25
},
'conv_0': {
'filters': 300,
'kernel_size': 1,
'kernel_initializer': 'normal',
'padding': 'valid',
'activation': 'relu'
},
'conv_1': {
'filters': 300,
'kernel_size': 2,
'kernel_initializer': 'normal',
'padding': 'valid',
'activation': 'relu'
},
'conv_2': {
'filters': 300,
'kernel_size': 3,
'kernel_initializer': 'normal',
'padding': 'valid',
'activation': 'relu'
},
'conv_3': {
'filters': 300,
'kernel_size': 4,
'kernel_initializer': 'normal',
'padding': 'valid',
'activation': 'relu'
},
# ---
'attn_0': {},
'avg_0': {},
'maxpool_0': {},
# ---
'maxpool_1': {},
'attn_1': {},
'avg_1': {},
# ---
'maxpool_2': {},
'attn_2': {},
'avg_2': {},
# ---
'maxpool_3': {},
'attn_3': {},
'avg_3': {},
# ---
'v_col3': {
# 'mode': 'concat',
'axis': 1
},
'merged_tensor': {
# 'mode': 'concat',
'axis': 1
},
'dropout': {
'rate': 0.7
},
'dense': {
'units': 144,
'activation': 'relu'
},
'activation_layer': {
'activation': 'softmax'
},
}
def build_model_arc(self):
output_dim = len(self.processor.label2idx)
config = self.hyper_parameters
embed_model = self.embedding.embed_model
layer_embed_dropout = L.SpatialDropout1D(**config['spatial_dropout'])
layers_conv = [L.Conv1D(**config[f'conv_{i}']) for i in range(4)]
layers_sensor = []
layers_sensor.append(L.GlobalMaxPooling1D())
layers_sensor.append(AttentionWeightedAverageLayer())
layers_sensor.append(L.GlobalAveragePooling1D())
layer_view = L.Concatenate(**config['v_col3'])
layer_allviews = L.Concatenate(**config['merged_tensor'])
layers_seq = []
layers_seq.append(L.Dropout(**config['dropout']))
layers_seq.append(L.Dense(**config['dense']))
layers_seq.append(L.Dense(output_dim, **config['activation_layer']))
embed_tensor = layer_embed_dropout(embed_model.output)
tensors_conv = [layer_conv(embed_tensor) for layer_conv in layers_conv]
tensors_matrix_sensor = []
for tensor_conv in tensors_conv:
tensor_sensors = []
tensor_sensors = [layer_sensor(tensor_conv) for layer_sensor in layers_sensor]
# tensor_sensors.append(L.GlobalMaxPooling1D()(tensor_conv))
# tensor_sensors.append(AttentionWeightedAverageLayer()(tensor_conv))
# tensor_sensors.append(L.GlobalAveragePooling1D()(tensor_conv))
tensors_matrix_sensor.append(tensor_sensors)
tensors_views = [layer_view(list(tensors)) for tensors in zip(*tensors_matrix_sensor)]
tensor = layer_allviews(tensors_views)
# tensors_v_cols = [L.concatenate(tensors, **config['v_col3']) for tensors
# in zip(*tensors_matrix_sensor)]
# tensor = L.concatenate(tensors_v_cols, **config['merged_tensor'])
for layer in layers_seq:
tensor = layer(tensor)
self.tf_model = tf.keras.Model(embed_model.inputs, tensor)
class KMax_CNN_Model(BaseClassificationModel):
@classmethod
def get_default_hyper_parameters(cls) -> Dict[str, Dict[str, Any]]:
return {
'spatial_dropout': {
'rate': 0.2
},
'conv_0': {
'filters': 180,
'kernel_size': 1,
'kernel_initializer': 'normal',
'padding': 'valid',
'activation': 'relu'
},
'conv_1': {
'filters': 180,
'kernel_size': 2,
'kernel_initializer': 'normal',
'padding': 'valid',
'activation': 'relu'
},
'conv_2': {
'filters': 180,
'kernel_size': 3,
'kernel_initializer': 'normal',
'padding': 'valid',
'activation': 'relu'
},
'conv_3': {
'filters': 180,
'kernel_size': 4,
'kernel_initializer': 'normal',
'padding': 'valid',
'activation': 'relu'
},
'maxpool_i4': {
'k': 3
},
'merged_tensor': {
# 'mode': 'concat',
'axis': 1
},
'dropout': {
'rate': 0.6
},
'dense': {
'units': 144,
'activation': 'relu'
},
'activation_layer': {
'activation': 'softmax'
},
}
def build_model_arc(self):
output_dim = len(self.processor.label2idx)
config = self.hyper_parameters
embed_model = self.embedding.embed_model
layer_embed_dropout = L.SpatialDropout1D(**config['spatial_dropout'])
layers_conv = [L.Conv1D(**config[f'conv_{i}']) for i in range(4)]
layers_sensor = [KMaxPoolingLayer(**config['maxpool_i4']),
L.Flatten()]
layer_concat = L.Concatenate(**config['merged_tensor'])
layers_seq = []
layers_seq.append(L.Dropout(**config['dropout']))
layers_seq.append(L.Dense(**config['dense']))
layers_seq.append(L.Dense(output_dim, **config['activation_layer']))
embed_tensor = layer_embed_dropout(embed_model.output)
tensors_conv = [layer_conv(embed_tensor) for layer_conv in layers_conv]
tensors_sensor = []
for tensor_conv in tensors_conv:
tensor_sensor = tensor_conv
for layer_sensor in layers_sensor:
tensor_sensor = layer_sensor(tensor_sensor)
tensors_sensor.append(tensor_sensor)
tensor = layer_concat(tensors_sensor)
# tensor = L.concatenate(tensors_sensor, **config['merged_tensor'])
for layer in layers_seq:
tensor = layer(tensor)
self.tf_model = tf.keras.Model(embed_model.inputs, tensor)
class R_CNN_Model(BaseClassificationModel):
@classmethod
def get_default_hyper_parameters(cls) -> Dict[str, Dict[str, Any]]:
return {
'spatial_dropout': {
'rate': 0.2
},
'rnn_0': {
'units': 64,
'return_sequences': True
},
'conv_0': {
'filters': 128,
'kernel_size': 2,
'kernel_initializer': 'normal',
'padding': 'valid',
'activation': 'relu',
'strides': 1
},
'maxpool': {},
'attn': {},
'average': {},
'concat': {
'axis': 1
},
'dropout': {
'rate': 0.5
},
'dense': {
'units': 120,
'activation': 'relu'
},
'activation_layer': {
'activation': 'softmax'
},
}
def build_model_arc(self):
output_dim = len(self.processor.label2idx)
config = self.hyper_parameters
embed_model = self.embedding.embed_model
layers_rcnn_seq = []
layers_rcnn_seq.append(L.SpatialDropout1D(**config['spatial_dropout']))
layers_rcnn_seq.append(L.Bidirectional(L.GRU(**config['rnn_0'])))
layers_rcnn_seq.append(L.Conv1D(**config['conv_0']))
layers_sensor = []
layers_sensor.append(L.GlobalMaxPooling1D())
layers_sensor.append(AttentionWeightedAverageLayer())
layers_sensor.append(L.GlobalAveragePooling1D())
layer_concat = L.Concatenate(**config['concat'])
layers_full_connect = []
layers_full_connect.append(L.Dropout(**config['dropout']))
layers_full_connect.append(L.Dense(**config['dense']))
layers_full_connect.append(L.Dense(output_dim, **config['activation_layer']))
tensor = embed_model.output
for layer in layers_rcnn_seq:
tensor = layer(tensor)
tensors_sensor = [layer(tensor) for layer in layers_sensor]
tensor_output = layer_concat(tensors_sensor)
# tensor_output = L.concatenate(tensor_sensors, **config['concat'])
for layer in layers_full_connect:
tensor_output = layer(tensor_output)
self.tf_model = tf.keras.Model(embed_model.inputs, tensor_output)
class AVRNN_Model(BaseClassificationModel):
@classmethod
def get_default_hyper_parameters(cls) -> Dict[str, Dict[str, Any]]:
return {
'spatial_dropout': {
'rate': 0.25
},
'rnn_0': {
'units': 60,
'return_sequences': True
},
'rnn_1': {
'units': 60,
'return_sequences': True
},
'concat_rnn': {
'axis': 2
},
'last': {},
'maxpool': {},
'attn': {},
'average': {},
'all_views': {
'axis': 1
},
'dropout': {
'rate': 0.5
},
'dense': {
'units': 144,
'activation': 'relu'
},
'activation_layer': {
'activation': 'softmax'
},
}
def build_model_arc(self):
output_dim = len(self.processor.label2idx)
config = self.hyper_parameters
embed_model = self.embedding.embed_model
layers_rnn0 = []
layers_rnn0.append(L.SpatialDropout1D(**config['spatial_dropout']))
layers_rnn0.append(L.Bidirectional(L.GRU(**config['rnn_0'])))
layer_bi_rnn1 = L.Bidirectional(L.GRU(**config['rnn_1']))
layer_concat = L.Concatenate(**config['concat_rnn'])
layers_sensor = []
layers_sensor.append(L.Lambda(lambda t: t[:, -1], name='last'))
layers_sensor.append(L.GlobalMaxPooling1D())
layers_sensor.append(AttentionWeightedAverageLayer())
layers_sensor.append(L.GlobalAveragePooling1D())
layer_allviews = L.Concatenate(**config['all_views'])
layers_full_connect = []
layers_full_connect.append(L.Dropout(**config['dropout']))
layers_full_connect.append(L.Dense(**config['dense']))
layers_full_connect.append(L.Dense(output_dim, **config['activation_layer']))
tensor_rnn = embed_model.output
for layer in layers_rnn0:
tensor_rnn = layer(tensor_rnn)
tensor_concat = layer_concat([tensor_rnn, layer_bi_rnn1(tensor_rnn)])
tensor_sensors = [layer(tensor_concat) for layer in layers_sensor]
tensor_output = layer_allviews(tensor_sensors)
for layer in layers_full_connect:
tensor_output = layer(tensor_output)
self.tf_model = tf.keras.Model(embed_model.inputs, tensor_output)
class Dropout_BiGRU_Model(BaseClassificationModel):
@classmethod
def get_default_hyper_parameters(cls) -> Dict[str, Dict[str, Any]]:
return {
'spatial_dropout': {
'rate': 0.15
},
'rnn_0': {
'units': 64,
'return_sequences': True
},
'dropout_rnn': {
'rate': 0.35
},
'rnn_1': {
'units': 64,
'return_sequences': True
},
'last': {},
'maxpool': {},
'average': {},
'all_views': {
'axis': 1
},
'dropout': {
'rate': 0.5
},
'dense': {
'units': 72,
'activation': 'relu'
},
'activation_layer': {
'activation': 'softmax'
},
}
def build_model_arc(self):
output_dim = len(self.processor.label2idx)
config = self.hyper_parameters
embed_model = self.embedding.embed_model
layers_rnn = []
layers_rnn.append(L.SpatialDropout1D(**config['spatial_dropout']))
layers_rnn.append(L.Bidirectional(L.GRU(**config['rnn_0'])))
layers_rnn.append(L.Dropout(**config['dropout_rnn']))
layers_rnn.append(L.Bidirectional(L.GRU(**config['rnn_1'])))
layers_sensor = []
layers_sensor.append(L.Lambda(lambda t: t[:, -1], name='last'))
layers_sensor.append(L.GlobalMaxPooling1D())
layers_sensor.append(L.GlobalAveragePooling1D())
layer_allviews = L.Concatenate(**config['all_views'])
layers_full_connect = []
layers_full_connect.append(L.Dropout(**config['dropout']))
layers_full_connect.append(L.Dense(**config['dense']))
layers_full_connect.append(L.Dense(output_dim, **config['activation_layer']))
tensor_rnn = embed_model.output
for layer in layers_rnn:
tensor_rnn = layer(tensor_rnn)
tensor_sensors = [layer(tensor_rnn) for layer in layers_sensor]
tensor_output = layer_allviews(tensor_sensors)
for layer in layers_full_connect:
tensor_output = layer(tensor_output)
self.tf_model = tf.keras.Model(embed_model.inputs, tensor_output)
class Dropout_AVRNN_Model(BaseClassificationModel):
@classmethod
def get_default_hyper_parameters(cls) -> Dict[str, Dict[str, Any]]:
return {
'spatial_dropout': {
'rate': 0.25
},
'rnn_0': {
'units': 56,
'return_sequences': True
},
'rnn_dropout': {
'rate': 0.3
},
'rnn_1': {
'units': 56,
'return_sequences': True
},
'last': {},
'maxpool': {},
'attn': {},
'average': {},
'all_views': {
'axis': 1
},
'dropout_0': {
'rate': 0.5
},
'dense': {
'units': 128,
'activation': 'relu'
},
'dropout_1': {
'rate': 0.25
},
'activation_layer': {
'activation': 'softmax'
},
}
def build_model_arc(self):
output_dim = len(self.processor.label2idx)
config = self.hyper_parameters
embed_model = self.embedding.embed_model
layers_rnn = []
layers_rnn.append(L.SpatialDropout1D(**config['spatial_dropout']))
layers_rnn.append(L.Bidirectional(L.GRU(**config['rnn_0'])))
layers_rnn.append(L.SpatialDropout1D(**config['rnn_dropout']))
layers_rnn.append(L.Bidirectional(L.GRU(**config['rnn_1'])))
layers_sensor = []
layers_sensor.append(L.Lambda(lambda t: t[:, -1], name='last'))
layers_sensor.append(L.GlobalMaxPooling1D())
layers_sensor.append(AttentionWeightedAverageLayer())
layers_sensor.append(L.GlobalAveragePooling1D())
layer_allviews = L.Concatenate(**config['all_views'])
layers_full_connect = []
layers_full_connect.append(L.Dropout(**config['dropout_0']))
layers_full_connect.append(L.Dense(**config['dense']))
layers_full_connect.append(L.Dropout(**config['dropout_1']))
layers_full_connect.append(L.Dense(output_dim, **config['activation_layer']))
tensor_rnn = embed_model.output
for layer in layers_rnn:
tensor_rnn = layer(tensor_rnn)
tensor_sensors = [layer(tensor_rnn) for layer in layers_sensor]
tensor_output = layer_allviews(tensor_sensors)
for layer in layers_full_connect:
tensor_output = layer(tensor_output)
self.tf_model = tf.keras.Model(embed_model.inputs, tensor_output)
if __name__ == "__main__":
print(BiLSTM_Model.get_default_hyper_parameters())
logging.basicConfig(level=logging.DEBUG)
from kashgari.corpus import SMP2018ECDTCorpus
x, y = SMP2018ECDTCorpus.load_data()
import kashgari
from kashgari.processors.classification_processor import ClassificationProcessor
from kashgari.embeddings import BareEmbedding
processor = ClassificationProcessor(multi_label=False)
embed = BareEmbedding(task=kashgari.CLASSIFICATION, sequence_length=30, processor=processor)
m = BiLSTM_Model(embed)
# m.build_model(x, y)
m.fit(x, y, epochs=2)
print(m.predict(x[:10]))
# m.evaluate(x, y)
print(m.predict_top_k_class(x[:10]))
| 125softNLP | /125softNLP-0.0.1-py3-none-any.whl/pysoftNLP/ner/kashgari/tasks/classification/models.py | models.py |
# encoding: utf-8
# author: Alex
# contact: ialexwwang@gmail.com
# version: 0.1
# license: Apache Licence
# file: dpcnn_model.py
# time: 2019-07-02 19:15
# Reference:
# https://ai.tencent.com/ailab/media/publications/ACL3-Brady.pdf
# https://github.com/Cheneng/DPCNN
# https://github.com/miracleyoo/DPCNN-TextCNN-Pytorch-Inception
# https://www.kaggle.com/michaelsnell/conv1d-dpcnn-in-keras
from math import log2, floor
from typing import Dict, Any
import tensorflow as tf
from kashgari.layers import L, KMaxPoolingLayer
from kashgari.tasks.classification.base_model import BaseClassificationModel
class DPCNN_Model(BaseClassificationModel):
'''
This implementation of DPCNN requires a clear declared sequence length.
So sequences input in should be padded or cut to a given length in advance.
'''
@classmethod
def get_default_hyper_parameters(cls) -> Dict[str, Dict[str, Any]]:
pool_type = 'max'
filters = 250
activation = 'linear'
return {
'region_embedding': {
'filters': filters,
'kernel_size': 3,
'strides': 1,
'padding': 'same',
'activation': activation,
'name': 'region_embedding',
},
'region_dropout': {
'rate': 0.2,
},
'conv_block': {
'filters': filters,
'kernel_size': 3,
'activation': activation,
'shortcut': True,
},
'resnet_block': {
'filters': filters,
'kernel_size': 3,
'activation': activation,
'shortcut': True,
'pool_type': pool_type,
'sorted': True,
},
'dense': {
'units': 256,
'activation': activation,
},
'dropout': {
'rate': 0.5,
},
'activation': {
'activation': 'softmax',
}
}
def downsample(self, inputs, pool_type: str = 'max',
sorted: bool = True, stage: int = 1): # noqa: A002
layers_pool = []
if pool_type == 'max':
layers_pool.append(
L.MaxPooling1D(pool_size=3,
strides=2,
padding='same',
name=f'pool_{stage}'))
elif pool_type == 'k_max':
k = int(inputs.shape[1].value / 2)
layers_pool.append(
KMaxPoolingLayer(k=k,
sorted=sorted,
name=f'pool_{stage}'))
elif pool_type == 'conv':
layers_pool.append(
L.Conv1D(filters=inputs.shape[-1].value,
kernel_size=3,
strides=2,
padding='same',
name=f'pool_{stage}'))
layers_pool.append(
L.BatchNormalization())
elif pool_type is None:
layers_pool = []
else:
raise ValueError(f'unsupported pooling type `{pool_type}`!')
tensor_out = inputs
for layer in layers_pool:
tensor_out = layer(tensor_out)
return tensor_out
def conv_block(self, inputs, filters: int, kernel_size: int = 3,
activation: str = 'linear', shortcut: bool = True):
layers_conv_unit = []
layers_conv_unit.append(
L.BatchNormalization())
layers_conv_unit.append(
L.PReLU())
layers_conv_unit.append(
L.Conv1D(filters=filters,
kernel_size=kernel_size,
strides=1,
padding='same',
activation=activation))
layers_conv_block = layers_conv_unit * 2
tensor_out = inputs
for layer in layers_conv_block:
tensor_out = layer(tensor_out)
if shortcut:
tensor_out = L.Add()([inputs, tensor_out])
return tensor_out
def resnet_block(self, inputs, filters: int, kernel_size: int = 3,
activation: str = 'linear', shortcut: bool = True,
pool_type: str = 'max', sorted: bool = True, stage: int = 1): # noqa: A002
tensor_pool = self.downsample(inputs, pool_type=pool_type, sorted=sorted, stage=stage)
tensor_out = self.conv_block(tensor_pool, filters=filters, kernel_size=kernel_size,
activation=activation, shortcut=shortcut)
return tensor_out
def build_model_arc(self):
output_dim = len(self.processor.label2idx)
config = self.hyper_parameters
embed_model = self.embedding.embed_model
layers_region = [
L.Conv1D(**config['region_embedding']),
L.BatchNormalization(),
L.PReLU(),
L.Dropout(**config['region_dropout'])
]
layers_main = [
L.GlobalMaxPooling1D(),
L.Dense(**config['dense']),
L.BatchNormalization(),
L.PReLU(),
L.Dropout(**config['dropout']),
L.Dense(output_dim, **config['activation'])
]
tensor_out = embed_model.output
# build region tensors
for layer in layers_region:
tensor_out = layer(tensor_out)
# build the base pyramid layer
tensor_out = self.conv_block(tensor_out, **config['conv_block'])
# build the above pyramid layers while `steps > 2`
seq_len = tensor_out.shape[1].value
if seq_len is None:
raise ValueError('`sequence_length` should be explicitly assigned, but it is `None`.')
for i in range(floor(log2(seq_len)) - 2):
tensor_out = self.resnet_block(tensor_out, stage=i + 1,
**config['resnet_block'])
for layer in layers_main:
tensor_out = layer(tensor_out)
self.tf_model = tf.keras.Model(embed_model.inputs, tensor_out)
| 125softNLP | /125softNLP-0.0.1-py3-none-any.whl/pysoftNLP/ner/kashgari/tasks/classification/dpcnn_model.py | dpcnn_model.py |
# encoding: utf-8
# author: BrikerMan
# contact: eliyar917@gmail.com
# blog: https://eliyar.biz
# file: base_model.py
# time: 11:36 上午
from typing import Callable
from typing import Dict, Any
import numpy as np
from sklearn import metrics
from kashgari.tasks.base_model import BaseModel
class BaseScoringModel(BaseModel):
"""Base Sequence Labeling Model"""
__task__ = 'scoring'
@classmethod
def get_default_hyper_parameters(cls) -> Dict[str, Dict[str, Any]]:
raise NotImplementedError
def compile_model(self, **kwargs):
if kwargs.get('loss') is None:
kwargs['loss'] = 'mse'
if kwargs.get('optimizer') is None:
kwargs['optimizer'] = 'rmsprop'
if kwargs.get('metrics') is None:
kwargs['metrics'] = ['mae']
super(BaseScoringModel, self).compile_model(**kwargs)
def evaluate(self,
x_data,
y_data,
batch_size=None,
should_round: bool = False,
round_func: Callable = None,
digits=4,
debug_info=False) -> Dict:
"""
Build a text report showing the main classification metrics.
Args:
x_data:
y_data:
batch_size:
should_round:
round_func:
digits:
debug_info:
Returns:
"""
y_pred = self.predict(x_data, batch_size=batch_size)
if should_round:
if round_func is None:
round_func = np.round
print(self.processor.output_dim)
if self.processor.output_dim != 1:
raise ValueError('Evaluate with round function only accept 1D output')
y_pred = [round_func(i) for i in y_pred]
report = metrics.classification_report(y_data,
y_pred,
digits=digits)
report_dic = metrics.classification_report(y_data,
y_pred,
output_dict=True,
digits=digits)
print(report)
else:
mean_squared_error = metrics.mean_squared_error(y_data, y_pred)
r2_score = metrics.r2_score(y_data, y_pred)
report_dic = {
'mean_squared_error': mean_squared_error,
'r2_score': r2_score
}
print(f"mean_squared_error : {mean_squared_error}\n"
f"r2_score : {r2_score}")
return report_dic
if __name__ == "__main__":
pass
| 125softNLP | /125softNLP-0.0.1-py3-none-any.whl/pysoftNLP/ner/kashgari/tasks/scoring/base_model.py | base_model.py |
# encoding: utf-8
# author: BrikerMan
# contact: eliyar917@gmail.com
# blog: https://eliyar.biz
# file: __init__.py
# time: 11:36 上午
from kashgari.tasks.scoring.models import BiLSTM_Model
if __name__ == "__main__":
pass
| 125softNLP | /125softNLP-0.0.1-py3-none-any.whl/pysoftNLP/ner/kashgari/tasks/scoring/__init__.py | __init__.py |
# encoding: utf-8
# author: BrikerMan
# contact: eliyar917@gmail.com
# blog: https://eliyar.biz
# file: models.py
# time: 11:38 上午
import logging
from typing import Dict, Any
from tensorflow import keras
from kashgari.tasks.scoring.base_model import BaseScoringModel
from kashgari.layers import L
class BiLSTM_Model(BaseScoringModel):
@classmethod
def get_default_hyper_parameters(cls) -> Dict[str, Dict[str, Any]]:
return {
'layer_bi_lstm': {
'units': 128,
'return_sequences': False
},
'layer_dense': {
'activation': 'linear'
}
}
def build_model_arc(self):
output_dim = self.processor.output_dim
config = self.hyper_parameters
embed_model = self.embedding.embed_model
layer_bi_lstm = L.Bidirectional(L.LSTM(**config['layer_bi_lstm']))
layer_dense = L.Dense(output_dim, **config['layer_dense'])
tensor = layer_bi_lstm(embed_model.output)
output_tensor = layer_dense(tensor)
self.tf_model = keras.Model(embed_model.inputs, output_tensor)
if __name__ == "__main__":
from kashgari.corpus import SMP2018ECDTCorpus
import numpy as np
x, y = SMP2018ECDTCorpus.load_data('valid')
y = np.random.random((len(x), 4))
model = BiLSTM_Model()
model.fit(x, y)
print(model.predict(x[:10]))
| 125softNLP | /125softNLP-0.0.1-py3-none-any.whl/pysoftNLP/ner/kashgari/tasks/scoring/models.py | models.py |
# encoding: utf-8
# author: BrikerMan
# contact: eliyar917@gmail.com
# blog: https://eliyar.biz
# file: base_embedding.py
# time: 2019-05-20 17:40
import json
import logging
import pydoc
from typing import Union, List, Optional, Dict
import numpy as np
from tensorflow import keras
import kashgari
from kashgari.processors import ClassificationProcessor, LabelingProcessor, ScoringProcessor
from kashgari.processors.base_processor import BaseProcessor
L = keras.layers
class Embedding(object):
"""Base class for Embedding Model"""
def info(self) -> Dict:
return {
'processor': self.processor.info(),
'class_name': self.__class__.__name__,
'module': self.__class__.__module__,
'config': {
'sequence_length': self.sequence_length,
'embedding_size': self.embedding_size,
'task': self.task
},
'embed_model': json.loads(self.embed_model.to_json()),
}
@classmethod
def _load_saved_instance(cls,
config_dict: Dict,
model_path: str,
tf_model: keras.Model):
processor_info = config_dict['processor']
processor_class = pydoc.locate(f"{processor_info['module']}.{processor_info['class_name']}")
processor = processor_class(**processor_info['config'])
instance = cls(processor=processor,
from_saved_model=True, **config_dict['config'])
embed_model_json_str = json.dumps(config_dict['embed_model'])
instance.embed_model = keras.models.model_from_json(embed_model_json_str,
custom_objects=kashgari.custom_objects)
# Load Weights from model
for layer in instance.embed_model.layers:
layer.set_weights(tf_model.get_layer(layer.name).get_weights())
return instance
def __init__(self,
task: str = None,
sequence_length: Union[int, str] = 'auto',
embedding_size: int = 100,
processor: Optional[BaseProcessor] = None,
from_saved_model: bool = False):
self.task = task
self.embedding_size = embedding_size
if processor is None:
if task == kashgari.CLASSIFICATION:
self.processor = ClassificationProcessor()
elif task == kashgari.LABELING:
self.processor = LabelingProcessor()
elif task == kashgari.SCORING:
self.processor = ScoringProcessor()
else:
raise ValueError('Need to set the processor param, value: {labeling, classification, scoring}')
else:
self.processor = processor
self.sequence_length: Union[int, str] = sequence_length
self.embed_model: Optional[keras.Model] = None
self._tokenizer = None
@property
def token_count(self) -> int:
"""
corpus token count
"""
return len(self.processor.token2idx)
@property
def sequence_length(self) -> Union[int, str]:
"""
model sequence length
"""
return self.processor.sequence_length
@property
def label2idx(self) -> Dict[str, int]:
"""
label to index dict
"""
return self.processor.label2idx
@property
def token2idx(self) -> Dict[str, int]:
"""
token to index dict
"""
return self.processor.token2idx
@property
def tokenizer(self):
if self._tokenizer:
return self._tokenizer
else:
raise ValueError('This embedding not support built-in tokenizer')
@sequence_length.setter
def sequence_length(self, val: Union[int, str]):
if isinstance(val, str):
if val == 'auto':
logging.warning("Sequence length will auto set at 95% of sequence length")
elif val == 'variable':
val = None
else:
raise ValueError("sequence_length must be an int or 'auto' or 'variable'")
self.processor.sequence_length = val
def _build_model(self, **kwargs):
raise NotImplementedError
def analyze_corpus(self,
x: List[List[str]],
y: Union[List[List[str]], List[str]]):
"""
Prepare embedding layer and pre-processor for labeling task
Args:
x:
y:
Returns:
"""
self.processor.analyze_corpus(x, y)
if self.sequence_length == 'auto':
self.sequence_length = self.processor.dataset_info['RECOMMEND_LEN']
self._build_model()
def embed_one(self, sentence: Union[List[str], List[int]]) -> np.array:
"""
Convert one sentence to vector
Args:
sentence: target sentence, list of str
Returns:
vectorized sentence
"""
return self.embed([sentence])[0]
def embed(self,
sentence_list: Union[List[List[str]], List[List[int]]],
debug: bool = False) -> np.ndarray:
"""
batch embed sentences
Args:
sentence_list: Sentence list to embed
debug: show debug info
Returns:
vectorized sentence list
"""
tensor_x = self.process_x_dataset(sentence_list)
if debug:
logging.debug(f'sentence tensor: {tensor_x}')
embed_results = self.embed_model.predict(tensor_x)
return embed_results
def process_x_dataset(self,
data: List[List[str]],
subset: Optional[List[int]] = None) -> np.ndarray:
"""
batch process feature data while training
Args:
data: target dataset
subset: subset index list
Returns:
vectorized feature tensor
"""
return self.processor.process_x_dataset(data, self.sequence_length, subset)
def process_y_dataset(self,
data: List[List[str]],
subset: Optional[List[int]] = None) -> np.ndarray:
"""
batch process labels data while training
Args:
data: target dataset
subset: subset index list
Returns:
vectorized feature tensor
"""
return self.processor.process_y_dataset(data, self.sequence_length, subset)
def reverse_numerize_label_sequences(self,
sequences,
lengths=None):
return self.processor.reverse_numerize_label_sequences(sequences, lengths=lengths)
def __repr__(self):
return f"<{self.__class__} seq_len: {self.sequence_length}>"
def __str__(self):
return self.__repr__()
if __name__ == "__main__":
print("Hello world")
| 125softNLP | /125softNLP-0.0.1-py3-none-any.whl/pysoftNLP/ner/kashgari/embeddings/base_embedding.py | base_embedding.py |
# encoding: utf-8
# author: BrikerMan
# contact: eliyar917@gmail.com
# blog: https://eliyar.biz
# file: base_embedding.py
# time: 2019-05-26 17:40
import os
os.environ['TF_KERAS'] = '1'
import logging
from typing import Union, Optional, Any, List, Tuple
import numpy as np
import kashgari
import pathlib
from tensorflow.python.keras.utils import get_file
from kashgari.embeddings.base_embedding import Embedding
from kashgari.processors.base_processor import BaseProcessor
import keras_gpt_2 as gpt2
class GPT2Embedding(Embedding):
"""Pre-trained BERT embedding"""
def info(self):
info = super(GPT2Embedding, self).info()
info['config'] = {
'model_folder': self.model_folder,
'sequence_length': self.sequence_length
}
return info
def __init__(self,
model_folder: str,
task: str = None,
sequence_length: Union[Tuple[int, ...], str, int] = 'auto',
processor: Optional[BaseProcessor] = None,
from_saved_model: bool = False):
"""
Args:
task:
model_folder:
sequence_length:
processor:
from_saved_model:
"""
super(GPT2Embedding, self).__init__(task=task,
sequence_length=sequence_length,
embedding_size=0,
processor=processor,
from_saved_model=from_saved_model)
if isinstance(sequence_length, tuple):
if len(sequence_length) > 2:
raise ValueError('BERT only more 2')
else:
if not all([s == sequence_length[0] for s in sequence_length]):
raise ValueError('BERT only receive all')
if sequence_length == 'variable':
self.sequence_length = None
self.processor.token_pad = 'pad'
self.processor.token_unk = 'unk'
self.processor.token_bos = 'pad'
self.processor.token_eos = 'pad'
self.model_folder = model_folder
if not from_saved_model:
self._build_token2idx_from_gpt()
self._build_model()
def _build_token2idx_from_gpt(self):
encoder_path = os.path.join(self.model_folder, 'encoder.json')
vocab_path = os.path.join(self.model_folder, 'vocab.bpe')
bpe: gpt2.BytePairEncoding = gpt2.get_bpe_from_files(encoder_path, vocab_path)
token2idx = bpe.token_dict.copy()
self.processor.token2idx = token2idx
self.processor.idx2token = dict([(value, key) for key, value in token2idx.items()])
def _build_model(self, **kwargs):
if self.embed_model is None and self.sequence_length != 'auto':
config_path = os.path.join(self.model_folder, 'hparams.json')
checkpoint_path = os.path.join(self.model_folder, 'model.ckpt')
model = gpt2.load_trained_model_from_checkpoint(config_path,
checkpoint_path,
self.sequence_length)
if not kashgari.config.disable_auto_summary:
model.summary()
self.embed_model = model
# if self.token_count == 0:
# logging.debug('need to build after build_word2idx')
# elif self.embed_model is None:
# seq_len = self.sequence_length
# if isinstance(seq_len, tuple):
# seq_len = seq_len[0]
# if isinstance(seq_len, str):
# return
# config_path = os.path.join(self.bert_path, 'bert_config.json')
# check_point_path = os.path.join(self.bert_path, 'bert_model.ckpt')
# bert_model = keras_bert.load_trained_model_from_checkpoint(config_path,
# check_point_path,
# seq_len=seq_len)
#
# self._model = tf.keras.Model(bert_model.inputs, bert_model.output)
# bert_seq_len = int(bert_model.output.shape[1])
# if bert_seq_len < seq_len:
# logging.warning(f"Sequence length limit set to {bert_seq_len} by pre-trained model")
# self.sequence_length = bert_seq_len
# self.embedding_size = int(bert_model.output.shape[-1])
# num_layers = len(bert_model.layers)
# bert_model.summary()
# target_layer_idx = [num_layers - 1 + idx * 8 for idx in range(-3, 1)]
# features_layers = [bert_model.get_layer(index=idx).output for idx in target_layer_idx]
# embedding_layer = L.concatenate(features_layers)
# output_features = NonMaskingLayer()(embedding_layer)
#
# self.embed_model = tf.keras.Model(bert_model.inputs, output_features)
# logging.warning(f'seq_len: {self.sequence_length}')
def analyze_corpus(self,
x: Union[Tuple[List[List[str]], ...], List[List[str]]],
y: Union[List[List[Any]], List[Any]]):
"""
Prepare embedding layer and pre-processor for labeling task
Args:
x:
y:
Returns:
"""
if len(self.processor.token2idx) == 0:
self._build_token2idx_from_gpt()
super(GPT2Embedding, self).analyze_corpus(x, y)
def embed(self,
sentence_list: Union[Tuple[List[List[str]], ...], List[List[str]]],
debug: bool = False) -> np.ndarray:
"""
batch embed sentences
Args:
sentence_list: Sentence list to embed
debug: show debug log
Returns:
vectorized sentence list
"""
tensor_x = self.process_x_dataset(sentence_list)
if debug:
logging.debug(f'sentence tensor: {tensor_x}')
embed_results = self.embed_model.predict(tensor_x)
return embed_results
def process_x_dataset(self,
data: Union[Tuple[List[List[str]], ...], List[List[str]]],
subset: Optional[List[int]] = None) -> Tuple[np.ndarray, ...]:
"""
batch process feature data while training
Args:
data: target dataset
subset: subset index list
Returns:
vectorized feature tensor
"""
x1 = None
if isinstance(data, tuple):
if len(data) == 2:
x0 = self.processor.process_x_dataset(data[0], self.sequence_length, subset)
x1 = self.processor.process_x_dataset(data[1], self.sequence_length, subset)
else:
x0 = self.processor.process_x_dataset(data[0], self.sequence_length, subset)
else:
x0 = self.processor.process_x_dataset(data, self.sequence_length, subset)
if x1 is None:
x1 = np.zeros(x0.shape, dtype=np.int32)
return x0, x1
@classmethod
def load_data(cls, model_name):
"""
Download pretrained GPT-2 models
Args:
model_name: {117M, 345M}
Returns:
GPT-2 model folder
"""
model_folder: pathlib.Path = pathlib.Path(os.path.join(kashgari.macros.DATA_PATH,
'datasets',
f'gpt2-{model_name}'))
model_folder.mkdir(exist_ok=True, parents=True)
for filename in ['checkpoint', 'encoder.json', 'hparams.json', 'model.ckpt.data-00000-of-00001',
'model.ckpt.index', 'model.ckpt.meta', 'vocab.bpe']:
url = "https://storage.googleapis.com/gpt-2/models/" + model_name + "/" + filename
get_file(os.path.join(f'gpt2-{model_name}', filename),
url,
cache_dir=kashgari.macros.DATA_PATH)
return str(model_folder)
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG)
# bert_model_path = os.path.join(utils.get_project_path(), 'tests/test-data/bert')
model_folder = GPT2Embedding.load_data('117M')
print(model_folder)
b = GPT2Embedding(task=kashgari.CLASSIFICATION,
model_folder=model_folder,
sequence_length=12)
# from kashgari.corpus import SMP2018ECDTCorpus
# test_x, test_y = SMP2018ECDTCorpus.load_data('valid')
# b.analyze_corpus(test_x, test_y)
data1 = 'all work and no play makes'.split(' ')
r = b.embed([data1], True)
print(r)
print(r.shape)
| 125softNLP | /125softNLP-0.0.1-py3-none-any.whl/pysoftNLP/ner/kashgari/embeddings/gpt_2_embedding.py | gpt_2_embedding.py |
# encoding: utf-8
# author: BrikerMan
# contact: eliyar917@gmail.com
# blog: https://eliyar.biz
# file: bert_embedding_v2.py
# time: 10:03 上午
import os
os.environ['TF_KERAS'] = '1'
import json
import codecs
import logging
from typing import Union, Optional
from bert4keras.models import build_transformer_model
import kashgari
import tensorflow as tf
from kashgari.embeddings.bert_embedding import BERTEmbedding
from kashgari.layers import NonMaskingLayer
from kashgari.processors.base_processor import BaseProcessor
import keras_bert
class BERTEmbeddingV2(BERTEmbedding):
"""Pre-trained BERT embedding"""
def info(self):
info = super(BERTEmbedding, self).info()
info['config'] = {
'model_folder': self.model_folder,
'sequence_length': self.sequence_length
}
return info
def __init__(self,
vacab_path: str,
config_path: str,
checkpoint_path: str,
bert_type: str = 'bert',
task: str = None,
sequence_length: Union[str, int] = 'auto',
processor: Optional[BaseProcessor] = None,
from_saved_model: bool = False):
"""
"""
self.model_folder = ''
self.vacab_path = vacab_path
self.config_path = config_path
self.checkpoint_path = checkpoint_path
super(BERTEmbedding, self).__init__(task=task,
sequence_length=sequence_length,
embedding_size=0,
processor=processor,
from_saved_model=from_saved_model)
self.bert_type = bert_type
self.processor.token_pad = '[PAD]'
self.processor.token_unk = '[UNK]'
self.processor.token_bos = '[CLS]'
self.processor.token_eos = '[SEP]'
self.processor.add_bos_eos = True
if not from_saved_model:
self._build_token2idx_from_bert()
self._build_model()
def _build_token2idx_from_bert(self):
token2idx = {}
with codecs.open(self.vacab_path, 'r', 'utf8') as reader:
for line in reader:
token = line.strip()
token2idx[token] = len(token2idx)
self.bert_token2idx = token2idx
self._tokenizer = keras_bert.Tokenizer(token2idx)
self.processor.token2idx = self.bert_token2idx
self.processor.idx2token = dict([(value, key) for key, value in token2idx.items()])
def _build_model(self, **kwargs):
if self.embed_model is None:
seq_len = self.sequence_length
if isinstance(seq_len, tuple):
seq_len = seq_len[0]
if isinstance(seq_len, str):
logging.warning(f"Model will be built when sequence length is determined")
return
config_path = self.config_path
config = json.load(open(config_path))
if seq_len > config.get('max_position_embeddings'):
seq_len = config.get('max_position_embeddings')
logging.warning(f"Max seq length is {seq_len}")
bert_model = build_transformer_model(config_path=self.config_path,
checkpoint_path=self.checkpoint_path,
model=self.bert_type,
application='encoder',
return_keras_model=True)
self.embed_model = bert_model
self.embedding_size = int(bert_model.output.shape[-1])
output_features = NonMaskingLayer()(bert_model.output)
self.embed_model = tf.keras.Model(bert_model.inputs, output_features)
if __name__ == "__main__":
# BERT_PATH = '/Users/brikerman/Desktop/nlp/language_models/bert/chinese_L-12_H-768_A-12'
model_folder = '/Users/brikerman/Desktop/nlp/language_models/albert_base'
checkpoint_path = os.path.join(model_folder, 'model.ckpt-best')
config_path = os.path.join(model_folder, 'albert_config.json')
vacab_path = os.path.join(model_folder, 'vocab_chinese.txt')
embed = BERTEmbeddingV2(vacab_path, config_path, checkpoint_path,
bert_type='albert',
task=kashgari.CLASSIFICATION,
sequence_length=100)
x = embed.embed_one(list('今天天气不错'))
print(x)
| 125softNLP | /125softNLP-0.0.1-py3-none-any.whl/pysoftNLP/ner/kashgari/embeddings/bert_embedding_v2.py | bert_embedding_v2.py |
# encoding: utf-8
# author: BrikerMan
# contact: eliyar917@gmail.com
# blog: https://eliyar.biz
# file: stacked_embedding.py
# time: 2019-05-23 09:18
import json
import pydoc
from typing import Union, Optional, Tuple, List, Dict
import numpy as np
import tensorflow as tf
from tensorflow.python import keras
import kashgari
from kashgari.embeddings.base_embedding import Embedding
from kashgari.layers import L
from kashgari.processors.base_processor import BaseProcessor
class StackedEmbedding(Embedding):
"""Embedding layer without pre-training, train embedding layer while training model"""
@classmethod
def _load_saved_instance(cls,
config_dict: Dict,
model_path: str,
tf_model: keras.Model):
embeddings = []
for embed_info in config_dict['embeddings']:
embed_class = pydoc.locate(f"{embed_info['module']}.{embed_info['class_name']}")
embedding: Embedding = embed_class._load_saved_instance(embed_info,
model_path,
tf_model)
embeddings.append(embedding)
instance = cls(embeddings=embeddings,
from_saved_model=True)
print('----')
print(instance.embeddings)
embed_model_json_str = json.dumps(config_dict['embed_model'])
instance.embed_model = keras.models.model_from_json(embed_model_json_str,
custom_objects=kashgari.custom_objects)
# Load Weights from model
for layer in instance.embed_model.layers:
layer.set_weights(tf_model.get_layer(layer.name).get_weights())
return instance
def info(self):
info = super(StackedEmbedding, self).info()
info['embeddings'] = [embed.info() for embed in self.embeddings]
info['config'] = {}
return info
def __init__(self,
embeddings: List[Embedding],
processor: Optional[BaseProcessor] = None,
from_saved_model: bool = False):
"""
Args:
embeddings:
processor:
"""
task = kashgari.CLASSIFICATION
if all(isinstance(embed.sequence_length, int) for embed in embeddings):
sequence_length = [embed.sequence_length for embed in embeddings]
else:
raise ValueError('Need to set sequence length for all embeddings while using stacked embedding')
super(StackedEmbedding, self).__init__(task=task,
sequence_length=sequence_length[0],
embedding_size=100,
processor=processor,
from_saved_model=from_saved_model)
self.embeddings = embeddings
self.processor = embeddings[0].processor
if not from_saved_model:
self._build_model()
def _build_model(self, **kwargs):
if self.embed_model is None and all(embed.embed_model is not None for embed in self.embeddings):
layer_concatenate = L.Concatenate(name='layer_concatenate')
inputs = []
for embed in self.embeddings:
inputs += embed.embed_model.inputs
# inputs = [embed.embed_model.inputs for embed in self.embeddings]
outputs = layer_concatenate([embed.embed_model.output for embed in self.embeddings])
self.embed_model = tf.keras.Model(inputs, outputs)
def analyze_corpus(self,
x: Union[Tuple[List[List[str]], ...], List[List[str]]],
y: Union[List[List[str]], List[str]]):
for index in range(len(x)):
self.embeddings[index].analyze_corpus(x[index], y)
self._build_model()
def process_x_dataset(self,
data: Tuple[List[List[str]], ...],
subset: Optional[List[int]] = None) -> Tuple[np.ndarray, ...]:
"""
batch process feature data while training
Args:
data: target dataset
subset: subset index list
Returns:
vectorized feature tensor
"""
result = []
for index, dataset in enumerate(data):
x = self.embeddings[index].process_x_dataset(dataset, subset)
if isinstance(x, tuple):
result += list(x)
else:
result.append(x)
return tuple(result)
def process_y_dataset(self,
data: List[List[str]],
subset: Optional[List[int]] = None) -> np.ndarray:
return self.embeddings[0].process_y_dataset(data, subset)
if __name__ == "__main__":
pass
| 125softNLP | /125softNLP-0.0.1-py3-none-any.whl/pysoftNLP/ner/kashgari/embeddings/stacked_embedding.py | stacked_embedding.py |
# encoding: utf-8
# author: BrikerMan
# contact: eliyar917@gmail.com
# blog: https://eliyar.biz
# file: base_embedding.py
# time: 2019-05-25 17:40
import os
os.environ['TF_KERAS'] = '1'
import codecs
import logging
from typing import Union, Optional, Any, List, Tuple
import numpy as np
import kashgari
import tensorflow as tf
from kashgari.layers import NonMaskingLayer
from kashgari.embeddings.base_embedding import Embedding
from kashgari.processors.base_processor import BaseProcessor
import keras_bert
class BERTEmbedding(Embedding):
"""Pre-trained BERT embedding"""
def info(self):
info = super(BERTEmbedding, self).info()
info['config'] = {
'model_folder': self.model_folder,
'sequence_length': self.sequence_length
}
return info
def __init__(self,
model_folder: str,
layer_nums: int = 4,
trainable: bool = False,
task: str = None,
sequence_length: Union[str, int] = 'auto',
processor: Optional[BaseProcessor] = None,
from_saved_model: bool = False):
"""
Args:
task:
model_folder:
layer_nums: number of layers whose outputs will be concatenated into a single tensor,
default `4`, output the last 4 hidden layers as the thesis suggested
trainable: whether if the model is trainable, default `False` and set it to `True`
for fine-tune this embedding layer during your training
sequence_length:
processor:
from_saved_model:
"""
self.trainable = trainable
# Do not need to train the whole bert model if just to use its feature output
self.training = False
self.layer_nums = layer_nums
if isinstance(sequence_length, tuple):
raise ValueError('BERT embedding only accept `int` type `sequence_length`')
if sequence_length == 'variable':
raise ValueError('BERT embedding only accept sequences in equal length')
super(BERTEmbedding, self).__init__(task=task,
sequence_length=sequence_length,
embedding_size=0,
processor=processor,
from_saved_model=from_saved_model)
self.processor.token_pad = '[PAD]'
self.processor.token_unk = '[UNK]'
self.processor.token_bos = '[CLS]'
self.processor.token_eos = '[SEP]'
self.processor.add_bos_eos = True
self.model_folder = model_folder
if not from_saved_model:
self._build_token2idx_from_bert()
self._build_model()
def _build_token2idx_from_bert(self):
dict_path = os.path.join(self.model_folder, 'vocab.txt')
token2idx = {}
with codecs.open(dict_path, 'r', 'utf8') as reader:
for line in reader:
token = line.strip()
token2idx[token] = len(token2idx)
self.bert_token2idx = token2idx
self._tokenizer = keras_bert.Tokenizer(token2idx)
self.processor.token2idx = self.bert_token2idx
self.processor.idx2token = dict([(value, key) for key, value in token2idx.items()])
def _build_model(self, **kwargs):
if self.embed_model is None:
seq_len = self.sequence_length
if isinstance(seq_len, tuple):
seq_len = seq_len[0]
if isinstance(seq_len, str):
logging.warning(f"Model will be built until sequence length is determined")
return
config_path = os.path.join(self.model_folder, 'bert_config.json')
check_point_path = os.path.join(self.model_folder, 'bert_model.ckpt')
bert_model = keras_bert.load_trained_model_from_checkpoint(config_path,
check_point_path,
seq_len=seq_len,
output_layer_num=self.layer_nums,
training=self.training,
trainable=self.trainable)
self._model = tf.keras.Model(bert_model.inputs, bert_model.output)
bert_seq_len = int(bert_model.output.shape[1])
if bert_seq_len < seq_len:
logging.warning(f"Sequence length limit set to {bert_seq_len} by pre-trained model")
self.sequence_length = bert_seq_len
self.embedding_size = int(bert_model.output.shape[-1])
output_features = NonMaskingLayer()(bert_model.output)
self.embed_model = tf.keras.Model(bert_model.inputs, output_features)
logging.warning(f'seq_len: {self.sequence_length}')
def analyze_corpus(self,
x: Union[Tuple[List[List[str]], ...], List[List[str]]],
y: Union[List[List[Any]], List[Any]]):
"""
Prepare embedding layer and pre-processor for labeling task
Args:
x:
y:
Returns:
"""
if len(self.processor.token2idx) == 0:
self._build_token2idx_from_bert()
super(BERTEmbedding, self).analyze_corpus(x, y)
def embed(self,
sentence_list: Union[Tuple[List[List[str]], ...], List[List[str]]],
debug: bool = False) -> np.ndarray:
"""
batch embed sentences
Args:
sentence_list: Sentence list to embed
debug: show debug log
Returns:
vectorized sentence list
"""
if self.embed_model is None:
raise ValueError('need to build model for embed sentence')
tensor_x = self.process_x_dataset(sentence_list)
if debug:
logging.debug(f'sentence tensor: {tensor_x}')
embed_results = self.embed_model.predict(tensor_x)
return embed_results
def process_x_dataset(self,
data: Union[Tuple[List[List[str]], ...], List[List[str]]],
subset: Optional[List[int]] = None) -> Tuple[np.ndarray, ...]:
"""
batch process feature data while training
Args:
data: target dataset
subset: subset index list
Returns:
vectorized feature tensor
"""
x1 = None
if isinstance(data, tuple):
if len(data) == 2:
x0 = self.processor.process_x_dataset(data[0], self.sequence_length, subset)
x1 = self.processor.process_x_dataset(data[1], self.sequence_length, subset)
else:
x0 = self.processor.process_x_dataset(data[0], self.sequence_length, subset)
else:
x0 = self.processor.process_x_dataset(data, self.sequence_length, subset)
if x1 is None:
x1 = np.zeros(x0.shape, dtype=np.int32)
return x0, x1
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG)
# bert_model_path = os.path.join(utils.get_project_path(), 'tests/test-data/bert')
b = BERTEmbedding(task=kashgari.CLASSIFICATION,
model_folder='/Users/brikerman/.kashgari/embedding/bert/chinese_L-12_H-768_A-12',
sequence_length=12)
from kashgari.corpus import SMP2018ECDTCorpus
test_x, test_y = SMP2018ECDTCorpus.load_data('valid')
b.analyze_corpus(test_x, test_y)
data1 = 'all work and no play makes'.split(' ')
data2 = '你 好 啊'.split(' ')
r = b.embed([data1], True)
tokens = b.process_x_dataset([['语', '言', '模', '型']])[0]
target_index = [101, 6427, 6241, 3563, 1798, 102]
target_index = target_index + [0] * (12 - len(target_index))
assert list(tokens[0]) == list(target_index)
print(tokens)
print(r)
print(r.shape)
| 125softNLP | /125softNLP-0.0.1-py3-none-any.whl/pysoftNLP/ner/kashgari/embeddings/bert_embedding.py | bert_embedding.py |
# encoding: utf-8
# author: BrikerMan
# contact: eliyar917@gmail.com
# blog: https://eliyar.biz
# file: w2v_embedding.py
# time: 2019-05-20 17:32
import logging
from typing import Union, Optional, Dict, Any, List, Tuple
import numpy as np
from gensim.models import KeyedVectors
from tensorflow import keras
from kashgari.embeddings.base_embedding import Embedding
from kashgari.processors.base_processor import BaseProcessor
L = keras.layers
class WordEmbedding(Embedding):
"""Pre-trained word2vec embedding"""
def info(self):
info = super(WordEmbedding, self).info()
info['config'] = {
'w2v_path': self.w2v_path,
'w2v_kwargs': self.w2v_kwargs,
'sequence_length': self.sequence_length
}
return info
def __init__(self,
w2v_path: str,
task: str = None,
w2v_kwargs: Dict[str, Any] = None,
sequence_length: Union[Tuple[int, ...], str, int] = 'auto',
processor: Optional[BaseProcessor] = None,
from_saved_model: bool = False):
"""
Args:
task:
w2v_path: word2vec file path
w2v_kwargs: params pass to the ``load_word2vec_format()`` function of ``gensim.models.KeyedVectors`` -
https://radimrehurek.com/gensim/models/keyedvectors.html#module-gensim.models.keyedvectors
sequence_length: ``'auto'``, ``'variable'`` or integer. When using ``'auto'``, use the 95% of corpus length
as sequence length. When using ``'variable'``, model input shape will set to None, which can handle
various length of input, it will use the length of max sequence in every batch for sequence length.
If using an integer, let's say ``50``, the input output sequence length will set to 50.
processor:
"""
if w2v_kwargs is None:
w2v_kwargs = {}
self.w2v_path = w2v_path
self.w2v_kwargs = w2v_kwargs
self.w2v_model_loaded = False
super(WordEmbedding, self).__init__(task=task,
sequence_length=sequence_length,
embedding_size=0,
processor=processor,
from_saved_model=from_saved_model)
if not from_saved_model:
self._build_token2idx_from_w2v()
if self.sequence_length != 'auto':
self._build_model()
def _build_token2idx_from_w2v(self):
w2v = KeyedVectors.load_word2vec_format(self.w2v_path, **self.w2v_kwargs)
token2idx = {
self.processor.token_pad: 0,
self.processor.token_unk: 1,
self.processor.token_bos: 2,
self.processor.token_eos: 3
}
for token in w2v.index2word:
token2idx[token] = len(token2idx)
vector_matrix = np.zeros((len(token2idx), w2v.vector_size))
vector_matrix[1] = np.random.rand(w2v.vector_size)
vector_matrix[4:] = w2v.vectors
self.embedding_size = w2v.vector_size
self.w2v_vector_matrix = vector_matrix
self.w2v_token2idx = token2idx
self.w2v_top_words = w2v.index2entity[:50]
self.w2v_model_loaded = True
self.processor.token2idx = self.w2v_token2idx
self.processor.idx2token = dict([(value, key) for key, value in self.w2v_token2idx.items()])
logging.debug('------------------------------------------------')
logging.debug('Loaded gensim word2vec model')
logging.debug('model : {}'.format(self.w2v_path))
logging.debug('word count : {}'.format(len(self.w2v_vector_matrix)))
logging.debug('Top 50 word : {}'.format(self.w2v_top_words))
logging.debug('------------------------------------------------')
def _build_model(self, **kwargs):
if self.token_count == 0:
logging.debug('need to build after build_word2idx')
else:
input_tensor = L.Input(shape=(self.sequence_length,),
name=f'input')
layer_embedding = L.Embedding(self.token_count,
self.embedding_size,
weights=[self.w2v_vector_matrix],
trainable=False,
name=f'layer_embedding')
embedded_tensor = layer_embedding(input_tensor)
self.embed_model = keras.Model(input_tensor, embedded_tensor)
def analyze_corpus(self,
x: Union[Tuple[List[List[str]], ...], List[List[str]]],
y: Union[List[List[Any]], List[Any]]):
"""
Prepare embedding layer and pre-processor for labeling task
Args:
x:
y:
Returns:
"""
if not self.w2v_model_loaded:
self._build_token2idx_from_w2v()
super(WordEmbedding, self).analyze_corpus(x, y)
if __name__ == "__main__":
print('hello world')
| 125softNLP | /125softNLP-0.0.1-py3-none-any.whl/pysoftNLP/ner/kashgari/embeddings/word_embedding.py | word_embedding.py |
# encoding: utf-8
# author: BrikerMan
# contact: eliyar917@gmail.com
# blog: https://eliyar.biz
# file: bare_embedding.py
# time: 2019-05-20 10:36
import logging
from typing import Union, Optional
from tensorflow import keras
from kashgari.embeddings.base_embedding import Embedding
from kashgari.processors.base_processor import BaseProcessor
L = keras.layers
# Todo: A better name for this class
class BareEmbedding(Embedding):
"""Embedding layer without pre-training, train embedding layer while training model"""
def __init__(self,
task: str = None,
sequence_length: Union[int, str] = 'auto',
embedding_size: int = 100,
processor: Optional[BaseProcessor] = None,
from_saved_model: bool = False):
"""
Init bare embedding (embedding without pre-training)
Args:
sequence_length: ``'auto'``, ``'variable'`` or integer. When using ``'auto'``, use the 95% of corpus length
as sequence length. When using ``'variable'``, model input shape will set to None, which can handle
various length of input, it will use the length of max sequence in every batch for sequence length.
If using an integer, let's say ``50``, the input output sequence length will set to 50.
embedding_size: Dimension of the dense embedding.
"""
super(BareEmbedding, self).__init__(task=task,
sequence_length=sequence_length,
embedding_size=embedding_size,
processor=processor,
from_saved_model=from_saved_model)
if not from_saved_model:
self._build_model()
def _build_model(self, **kwargs):
if self.sequence_length == 0 or \
self.sequence_length == 'auto' or \
self.token_count == 0:
logging.debug('need to build after build_word2idx')
else:
input_tensor = L.Input(shape=(self.sequence_length,),
name=f'input')
layer_embedding = L.Embedding(self.token_count,
self.embedding_size,
name=f'layer_embedding')
embedded_tensor = layer_embedding(input_tensor)
self.embed_model = keras.Model(input_tensor, embedded_tensor)
if __name__ == "__main__":
print('hello world')
| 125softNLP | /125softNLP-0.0.1-py3-none-any.whl/pysoftNLP/ner/kashgari/embeddings/bare_embedding.py | bare_embedding.py |
# encoding: utf-8
# author: BrikerMan
# contact: eliyar917@gmail.com
# blog: https://eliyar.biz
# file: numeric_feature_embedding.py
# time: 2019-05-23 09:04
from typing import Union, Optional, Tuple, List
import numpy as np
from tensorflow import keras
from tensorflow.python.keras.preprocessing.sequence import pad_sequences
import kashgari
from kashgari.embeddings.base_embedding import Embedding
from kashgari.processors.base_processor import BaseProcessor
L = keras.layers
# Todo: A better name for this class
class NumericFeaturesEmbedding(Embedding):
"""Embedding layer without pre-training, train embedding layer while training model"""
def info(self):
info = super(NumericFeaturesEmbedding, self).info()
info['config'] = {
'feature_count': self.feature_count,
'feature_name': self.feature_name,
'sequence_length': self.sequence_length,
'embedding_size': self.embedding_size
}
return info
def __init__(self,
feature_count: int,
feature_name: str,
sequence_length: Union[str, int] = 'auto',
embedding_size: int = None,
processor: Optional[BaseProcessor] = None,
from_saved_model: bool = False):
"""
Init bare embedding (embedding without pre-training)
Args:
sequence_length: ``'auto'``, ``'variable'`` or integer. When using ``'auto'``, use the 95% of corpus length
as sequence length. When using ``'variable'``, model input shape will set to None, which can handle
various length of input, it will use the length of max sequence in every batch for sequence length.
If using an integer, let's say ``50``, the input output sequence length will set to 50.
embedding_size: Dimension of the dense embedding.
"""
# Dummy Type
task = kashgari.CLASSIFICATION
if embedding_size is None:
embedding_size = feature_count * 8
super(NumericFeaturesEmbedding, self).__init__(task=task,
sequence_length=sequence_length,
embedding_size=embedding_size,
processor=processor,
from_saved_model=from_saved_model)
self.feature_count = feature_count
self.feature_name = feature_name
if not from_saved_model:
self._build_model()
def _build_model(self, **kwargs):
input_tensor = L.Input(shape=(self.sequence_length,),
name=f'input_{self.feature_name}')
layer_embedding = L.Embedding(self.feature_count + 1,
self.embedding_size,
name=f'layer_embedding_{self.feature_name}')
embedded_tensor = layer_embedding(input_tensor)
self.embed_model = keras.Model(input_tensor, embedded_tensor)
def analyze_corpus(self,
x: Union[Tuple[List[List[str]], ...], List[List[str]]],
y: Union[List[List[str]], List[str]]):
pass
def process_x_dataset(self,
data: List[List[str]],
subset: Optional[List[int]] = None) -> Tuple[np.ndarray, ...]:
"""
batch process feature data while training
Args:
data: target dataset
subset: subset index list
Returns:
vectorized feature tensor
"""
if subset is not None:
numerized_samples = kashgari.utils.get_list_subset(data, subset)
else:
numerized_samples = data
return pad_sequences(numerized_samples, self.sequence_length, padding='post', truncating='post')
if __name__ == "__main__":
e = NumericFeaturesEmbedding(2, feature_name='is_bold', sequence_length=10)
e.embed_model.summary()
print(e.embed_one([1, 2]))
print("Hello world")
| 125softNLP | /125softNLP-0.0.1-py3-none-any.whl/pysoftNLP/ner/kashgari/embeddings/numeric_feature_embedding.py | numeric_feature_embedding.py |
# encoding: utf-8
# author: BrikerMan
# contact: eliyar917@gmail.com
# blog: https://eliyar.biz
# file: __init__.py.py
# time: 2019-05-20 11:21
from kashgari.embeddings.bare_embedding import BareEmbedding
from kashgari.embeddings.bert_embedding import BERTEmbedding
from kashgari.embeddings.word_embedding import WordEmbedding
from kashgari.embeddings.numeric_feature_embedding import NumericFeaturesEmbedding
from kashgari.embeddings.stacked_embedding import StackedEmbedding
from kashgari.embeddings.gpt_2_embedding import GPT2Embedding
| 125softNLP | /125softNLP-0.0.1-py3-none-any.whl/pysoftNLP/ner/kashgari/embeddings/__init__.py | __init__.py |
# encoding: utf-8
# author: AlexWang
# contact: ialexwwang@gmail.com
# file: attention_weighted_average.py
# time: 2019-06-25 16:35
import kashgari
import tensorflow as tf
from tensorflow.python import keras
from tensorflow.python.keras import backend as K
L = keras.layers
InputSpec = L.InputSpec
class KMaxPoolingLayer(L.Layer):
'''
K-max pooling layer that extracts the k-highest activation from a sequence (2nd dimension).
TensorFlow backend.
# Arguments
k: An int scale,
indicate k max steps of features to pool.
sorted: A bool,
if output is sorted (default) or not.
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, steps, features)` while `channels_first`
corresponds to inputs with shape
`(batch, features, steps)`.
# Input shape
- If `data_format='channels_last'`:
3D tensor with shape:
`(batch_size, steps, features)`
- If `data_format='channels_first'`:
3D tensor with shape:
`(batch_size, features, steps)`
# Output shape
3D tensor with shape:
`(batch_size, top-k-steps, features)`
'''
def __init__(self, k=1, sorted=True, data_format='channels_last', **kwargs): # noqa: A002
super(KMaxPoolingLayer, self).__init__(**kwargs)
self.input_spec = InputSpec(ndim=3)
self.k = k
self.sorted = sorted
if data_format.lower() in ['channels_first', 'channels_last']:
self.data_format = data_format.lower()
else:
self.data_format = K.image_data_format()
def compute_output_shape(self, input_shape):
if self.data_format == 'channels_first':
return (input_shape[0], self.k, input_shape[1])
else:
return (input_shape[0], self.k, input_shape[2])
def call(self, inputs):
if self.data_format == 'channels_last':
# swap last two dimensions since top_k will be applied along the last dimension
shifted_input = tf.transpose(inputs, [0, 2, 1])
# extract top_k, returns two tensors [values, indices]
top_k = tf.nn.top_k(shifted_input, k=self.k, sorted=self.sorted)[0]
else:
top_k = tf.nn.top_k(inputs, k=self.k, sorted=self.sorted)[0]
# return flattened output
return tf.transpose(top_k, [0, 2, 1])
def get_config(self):
config = {'k': self.k,
'sorted': self.sorted,
'data_format': self.data_format}
base_config = super(KMaxPoolingLayer, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
KMaxPooling = KMaxPoolingLayer
KMaxPoolLayer = KMaxPoolingLayer
kashgari.custom_objects['KMaxPoolingLayer'] = KMaxPoolingLayer
kashgari.custom_objects['KMaxPooling'] = KMaxPooling
kashgari.custom_objects['KMaxPoolLayer'] = KMaxPoolLayer
if __name__ == '__main__':
print('Hello world, KMaxPoolLayer/KMaxPoolingLayer.')
| 125softNLP | /125softNLP-0.0.1-py3-none-any.whl/pysoftNLP/ner/kashgari/layers/kmax_pool_layer.py | kmax_pool_layer.py |
# encoding: utf-8
# author: AlexWang
# contact: ialexwwang@gmail.com
# file: attention_weighted_average.py
# time: 2019-06-24 19:35
from tensorflow.python import keras
from tensorflow.python.keras import backend as K
import kashgari
L = keras.layers
initializers = keras.initializers
InputSpec = L.InputSpec
class AttentionWeightedAverageLayer(L.Layer):
'''
Computes a weighted average of the different channels across timesteps.
Uses 1 parameter pr. channel to compute the attention value for a single timestep.
'''
def __init__(self, return_attention=False, **kwargs):
self.init = initializers.get('uniform')
self.supports_masking = True
self.return_attention = return_attention
super(AttentionWeightedAverageLayer, self).__init__(**kwargs)
def build(self, input_shape):
self.input_spec = [InputSpec(ndim=3)]
assert len(input_shape) == 3
self.W = self.add_weight(shape=(input_shape[2].value, 1),
name='{}_w'.format(self.name),
initializer=self.init,
trainable=True
)
# self.trainable_weights = [self.W]
super(AttentionWeightedAverageLayer, self).build(input_shape)
def call(self, x, mask=None):
# computes a probability distribution over the timesteps
# uses 'max trick' for numerical stability
# reshape is done to avoid issue with Tensorflow
# and 1-dimensional weights
logits = K.dot(x, self.W)
x_shape = K.shape(x)
logits = K.reshape(logits, (x_shape[0], x_shape[1]))
ai = K.exp(logits - K.max(logits, axis=-1, keepdims=True))
# masked timesteps have zero weight
if mask is not None:
mask = K.cast(mask, K.floatx())
ai = ai * mask
att_weights = ai / (K.sum(ai, axis=1, keepdims=True) + K.epsilon())
weighted_input = x * K.expand_dims(att_weights)
result = K.sum(weighted_input, axis=1)
if self.return_attention:
return [result, att_weights]
return result
def get_output_shape_for(self, input_shape):
return self.compute_output_shape(input_shape)
def compute_output_shape(self, input_shape):
output_len = input_shape[2]
if self.return_attention:
return [(input_shape[0], output_len), (input_shape[0], input_shape[1])]
return (input_shape[0], output_len)
def compute_mask(self, inputs, input_mask=None):
if isinstance(input_mask, list):
return [None] * len(input_mask)
else:
return None
def get_config(self):
config = {'return_attention': self.return_attention, }
base_config = super(AttentionWeightedAverageLayer, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
AttentionWeightedAverage = AttentionWeightedAverageLayer
AttWgtAvgLayer = AttentionWeightedAverageLayer
kashgari.custom_objects['AttentionWeightedAverageLayer'] = AttentionWeightedAverageLayer
kashgari.custom_objects['AttentionWeightedAverage'] = AttentionWeightedAverage
kashgari.custom_objects['AttWgtAvgLayer'] = AttWgtAvgLayer
if __name__ == '__main__':
print('Hello world, AttentionWeightedAverageLayer/AttWgtAvgLayer.')
| 125softNLP | /125softNLP-0.0.1-py3-none-any.whl/pysoftNLP/ner/kashgari/layers/att_wgt_avg_layer.py | att_wgt_avg_layer.py |
# encoding: utf-8
# author: BrikerMan
# contact: eliyar917@gmail.com
# blog: https://eliyar.biz
# file: crf.py
# time: 2019-06-28 14:33
import tensorflow as tf
class CRF(tf.keras.layers.Layer):
"""
Conditional Random Field layer (tf.keras)
`CRF` can be used as the last layer in a network (as a classifier). Input shape (features)
must be equal to the number of classes the CRF can predict (a linear layer is recommended).
Note: the loss and accuracy functions of networks using `CRF` must
use the provided loss and accuracy functions (denoted as loss and viterbi_accuracy)
as the classification of sequences are used with the layers internal weights.
Args:
output_dim (int): the number of labels to tag each temporal input.
Input shape:
nD tensor with shape `(batch_size, sentence length, num_classes)`.
Output shape:
nD tensor with shape: `(batch_size, sentence length, num_classes)`.
"""
def __init__(self,
output_dim,
mode='reg',
supports_masking=False,
transitions=None,
**kwargs):
self.transitions = None
super(CRF, self).__init__(**kwargs)
self.output_dim = int(output_dim)
self.mode = mode
if self.mode == 'pad':
self.input_spec = [tf.keras.layers.InputSpec(min_ndim=3), tf.keras.layers.InputSpec(min_ndim=2)]
elif self.mode == 'reg':
self.input_spec = tf.keras.layers.InputSpec(min_ndim=3)
else:
raise ValueError
self.supports_masking = supports_masking
self.sequence_lengths = None
def get_config(self):
config = {
'output_dim': self.output_dim,
'mode': self.mode,
'supports_masking': self.supports_masking,
'transitions': tf.keras.backend.eval(self.transitions)
}
base_config = super(CRF, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def build(self, input_shape):
if self.mode == 'pad':
assert len(input_shape) == 2
assert len(input_shape[0]) == 3
assert len(input_shape[1]) == 2
f_shape = tf.TensorShape(input_shape[0])
input_spec = [tf.keras.layers.InputSpec(min_ndim=3, axes={-1: f_shape[-1]}),
tf.keras.layers.InputSpec(min_ndim=2, axes={-1: 1}, dtype=tf.int32)]
else:
assert len(input_shape) == 3
f_shape = tf.TensorShape(input_shape)
input_spec = tf.keras.layers.InputSpec(min_ndim=3, axes={-1: f_shape[-1]})
if f_shape[-1] is None:
raise ValueError('The last dimension of the inputs to `CRF` should be defined. Found `None`.')
if f_shape[-1] != self.output_dim:
raise ValueError('The last dimension of the input shape must be equal to output shape. '
'Use a linear layer if needed.')
self.input_spec = input_spec
self.transitions = self.add_weight(name='transitions',
shape=[self.output_dim, self.output_dim],
initializer='glorot_uniform',
trainable=True)
self.built = True
def call(self, inputs, **kwargs):
if self.mode == 'pad':
sequences = tf.convert_to_tensor(inputs[0], dtype=self.dtype)
self.sequence_lengths = tf.keras.backend.flatten(inputs[-1])
else:
sequences = tf.convert_to_tensor(inputs, dtype=self.dtype)
shape = tf.shape(inputs)
self.sequence_lengths = tf.ones(shape[0], dtype=tf.int32) * (shape[1])
viterbi_sequence, _ = tf.contrib.crf.crf_decode(sequences, self.transitions,
self.sequence_lengths)
output = tf.keras.backend.one_hot(viterbi_sequence, self.output_dim)
return tf.keras.backend.in_train_phase(sequences, output)
def loss(self, y_true, y_pred):
y_pred = tf.convert_to_tensor(y_pred, dtype=self.dtype)
log_likelihood, self.transitions = tf.contrib.crf.crf_log_likelihood(y_pred,
tf.cast(tf.keras.backend.argmax(y_true),
dtype=tf.int32),
self.sequence_lengths,
transition_params=self.transitions)
return tf.reduce_mean(-log_likelihood)
def compute_output_shape(self, input_shape):
if self.mode == 'pad':
data_shape = input_shape[0]
else:
data_shape = input_shape
tf.TensorShape(data_shape).assert_has_rank(3)
return data_shape[:2] + (self.output_dim,)
@property
def viterbi_accuracy(self):
def accuracy(y_true, y_pred):
shape = tf.shape(y_pred)
sequence_lengths = tf.ones(shape[0], dtype=tf.int32) * (shape[1])
viterbi_sequence, _ = tf.contrib.crf.crf_decode(y_pred, self.transitions, sequence_lengths)
output = tf.keras.backend.one_hot(viterbi_sequence, self.output_dim)
return tf.keras.metrics.categorical_accuracy(y_true, output)
accuracy.func_name = 'viterbi_accuracy'
return accuracy
| 125softNLP | /125softNLP-0.0.1-py3-none-any.whl/pysoftNLP/ner/kashgari/layers/crf.py | crf.py |
# encoding: utf-8
# author: BrikerMan
# contact: eliyar917@gmail.com
# blog: https://eliyar.biz
# file: non_masking_layer.py
# time: 2019-05-23 14:05
import kashgari
from tensorflow.python.keras.layers import Layer
class NonMaskingLayer(Layer):
"""
fix convolutional 1D can't receive masked input, detail: https://github.com/keras-team/keras/issues/4978
thanks for https://github.com/jacoxu
"""
def __init__(self, **kwargs):
self.supports_masking = True
super(NonMaskingLayer, self).__init__(**kwargs)
def build(self, input_shape):
pass
def compute_mask(self, inputs, input_mask=None):
# do not pass the mask to the next layers
return None
def call(self, x, mask=None):
return x
kashgari.custom_objects['NonMaskingLayer'] = NonMaskingLayer
if __name__ == "__main__":
print("Hello world")
| 125softNLP | /125softNLP-0.0.1-py3-none-any.whl/pysoftNLP/ner/kashgari/layers/non_masking_layer.py | non_masking_layer.py |
# encoding: utf-8
# author: BrikerMan
# contact: eliyar917@gmail.com
# blog: https://eliyar.biz
# file: __init__.py
# time: 2019-05-23 14:05
import tensorflow as tf
from tensorflow.python import keras
from kashgari.layers.non_masking_layer import NonMaskingLayer
from kashgari.layers.att_wgt_avg_layer import AttentionWeightedAverageLayer
from kashgari.layers.att_wgt_avg_layer import AttentionWeightedAverage, AttWgtAvgLayer
from kashgari.layers.kmax_pool_layer import KMaxPoolingLayer, KMaxPoolLayer, KMaxPooling
L = keras.layers
if __name__ == "__main__":
print("Hello world")
| 125softNLP | /125softNLP-0.0.1-py3-none-any.whl/pysoftNLP/ner/kashgari/layers/__init__.py | __init__.py |
# encoding: utf-8
# author: BrikerMan
# contact: eliyar917@gmail.com
# blog: https://eliyar.biz
# file: scoring_processor.py
# time: 11:10 上午
from typing import List, Optional
import numpy as np
import kashgari
from kashgari import utils
from kashgari.processors.base_processor import BaseProcessor
def is_numeric(obj):
attrs = ['__add__', '__sub__', '__mul__', '__truediv__', '__pow__']
return all(hasattr(obj, attr) for attr in attrs)
class ScoringProcessor(BaseProcessor):
"""
Corpus Pre Processor class
"""
def __init__(self, output_dim=None, **kwargs):
super(ScoringProcessor, self).__init__(**kwargs)
self.output_dim = output_dim
def info(self):
info = super(ScoringProcessor, self).info()
info['task'] = kashgari.SCORING
return info
def _build_label_dict(self,
label_list: List[List[float]]):
"""
Build label2idx dict for sequence labeling task
Args:
label_list: corpus label list
"""
if self.output_dim is None:
label_sample = label_list[0]
if isinstance(label_sample, np.ndarray) and len(label_sample.shape) == 1:
self.output_dim = label_sample.shape[0]
elif is_numeric(label_sample):
self.output_dim = 1
elif isinstance(label_sample, list):
self.output_dim = len(label_sample)
else:
raise ValueError('Scoring Label Sample must be a float, float array or 1D numpy array')
# np_labels = np.array(label_list)
# if np_labels.max() > 1 or np_labels.min() < 0:
# raise ValueError('Scoring Label Sample must be in range[0,1]')
def process_y_dataset(self,
data: List[List[str]],
max_len: Optional[int] = None,
subset: Optional[List[int]] = None) -> np.ndarray:
if subset is not None:
target = utils.get_list_subset(data, subset)
else:
target = data[:]
y = np.array(target)
return y
def numerize_token_sequences(self,
sequences: List[List[str]]):
result = []
for seq in sequences:
if self.add_bos_eos:
seq = [self.token_bos] + seq + [self.token_eos]
unk_index = self.token2idx[self.token_unk]
result.append([self.token2idx.get(token, unk_index) for token in seq])
return result
def numerize_label_sequences(self,
sequences: List[List[str]]) -> List[List[int]]:
return sequences
def reverse_numerize_label_sequences(self,
sequences,
lengths=None):
return sequences
if __name__ == "__main__":
from kashgari.corpus import SMP2018ECDTCorpus
x, y = SMP2018ECDTCorpus.load_data()
x = x[:3]
y = [0.2, 0.3, 0.2]
p = ScoringProcessor()
p.analyze_corpus(x, y)
print(p.process_y_dataset(y))
| 125softNLP | /125softNLP-0.0.1-py3-none-any.whl/pysoftNLP/ner/kashgari/processors/scoring_processor.py | scoring_processor.py |
# encoding: utf-8
# author: BrikerMan
# contact: eliyar917@gmail.com
# blog: https://eliyar.biz
# file: base_processor.py
# time: 2019-05-21 11:27
import collections
import logging
import operator
from typing import List, Optional, Union, Dict, Any
import numpy as np
from tensorflow.python.keras.preprocessing.sequence import pad_sequences
from kashgari import utils
class BaseProcessor(object):
"""
Corpus Pre Processor class
"""
def __init__(self, **kwargs):
self.token2idx: Dict[str, int] = kwargs.get('token2idx', {})
self.idx2token: Dict[int, str] = dict([(v, k) for (k, v) in self.token2idx.items()])
self.token2count: Dict = {}
self.label2idx: Dict[str, int] = kwargs.get('label2idx', {})
self.idx2label: Dict[int, str] = dict([(v, k) for (k, v) in self.label2idx.items()])
self.token_pad: str = kwargs.get('token_pad', '<PAD>')
self.token_unk: str = kwargs.get('token_unk', '<UNK>')
self.token_bos: str = kwargs.get('token_bos', '<BOS>')
self.token_eos: str = kwargs.get('token_eos', '<EOS>')
self.dataset_info: Dict[str, Any] = kwargs.get('dataset_info', {})
self.add_bos_eos: bool = kwargs.get('add_bos_eos', False)
self.sequence_length = kwargs.get('sequence_length', None)
self.min_count = kwargs.get('min_count', 3)
def info(self):
return {
'class_name': self.__class__.__name__,
'config': {
'label2idx': self.label2idx,
'token2idx': self.token2idx,
'token_pad': self.token_pad,
'token_unk': self.token_unk,
'token_bos': self.token_bos,
'token_eos': self.token_eos,
'dataset_info': self.dataset_info,
'add_bos_eos': self.add_bos_eos,
'sequence_length': self.sequence_length
},
'module': self.__class__.__module__,
}
def analyze_corpus(self,
corpus: Union[List[List[str]]],
labels: Union[List[List[str]], List[str]],
force: bool = False):
rec_len = sorted([len(seq) for seq in corpus])[int(0.95 * len(corpus))]
self.dataset_info['RECOMMEND_LEN'] = rec_len
if len(self.token2idx) == 0 or force:
self._build_token_dict(corpus, self.min_count)
if len(self.label2idx) == 0 or force:
self._build_label_dict(labels)
def _build_token_dict(self, corpus: List[List[str]], min_count: int = 3):
"""
Build token index dictionary using corpus
Args:
corpus: List of tokenized sentences, like ``[['I', 'love', 'tf'], ...]``
min_count:
"""
token2idx = {
self.token_pad: 0,
self.token_unk: 1,
self.token_bos: 2,
self.token_eos: 3
}
token2count = {}
for sentence in corpus:
for token in sentence:
count = token2count.get(token, 0)
token2count[token] = count + 1
self.token2count = token2count
# 按照词频降序排序
sorted_token2count = sorted(token2count.items(),
key=operator.itemgetter(1),
reverse=True)
token2count = collections.OrderedDict(sorted_token2count)
for token, token_count in token2count.items():
if token not in token2idx and token_count >= min_count:
token2idx[token] = len(token2idx)
self.token2idx = token2idx
self.idx2token = dict([(value, key)
for key, value in self.token2idx.items()])
logging.debug(f"build token2idx dict finished, contains {len(self.token2idx)} tokens.")
self.dataset_info['token_count'] = len(self.token2idx)
def _build_label_dict(self, corpus: Union[List[List[str]], List[str]]):
raise NotImplementedError
def process_x_dataset(self,
data: List[List[str]],
max_len: Optional[int] = None,
subset: Optional[List[int]] = None) -> np.ndarray:
if max_len is None:
max_len = self.sequence_length
if subset is not None:
target = utils.get_list_subset(data, subset)
else:
target = data
numerized_samples = self.numerize_token_sequences(target)
return pad_sequences(numerized_samples, max_len, padding='post', truncating='post')
def process_y_dataset(self,
data: Union[List[List[str]], List[str]],
max_len: Optional[int],
subset: Optional[List[int]] = None) -> np.ndarray:
raise NotImplementedError
def numerize_token_sequences(self,
sequences: List[List[str]]):
raise NotImplementedError
def numerize_label_sequences(self,
sequences: List[List[str]]) -> List[List[int]]:
raise NotImplementedError
def reverse_numerize_label_sequences(self, sequence, **kwargs):
raise NotImplementedError
def __repr__(self):
return f"<{self.__class__}>"
def __str__(self):
return self.__repr__()
if __name__ == "__main__":
print("Hello world")
| 125softNLP | /125softNLP-0.0.1-py3-none-any.whl/pysoftNLP/ner/kashgari/processors/base_processor.py | base_processor.py |
# encoding: utf-8
# author: BrikerMan
# contact: eliyar917@gmail.com
# blog: https://eliyar.biz
# version: 1.0
# license: Apache Licence
# file: corpus.py
# time: 2019-05-17 11:28
import collections
import logging
import operator
from typing import List, Dict, Optional
import numpy as np
from tensorflow.python.keras.preprocessing.sequence import pad_sequences
from tensorflow.python.keras.utils import to_categorical
import kashgari
from kashgari import utils
from kashgari.processors.base_processor import BaseProcessor
class LabelingProcessor(BaseProcessor):
"""
Corpus Pre Processor class
"""
def info(self):
info = super(LabelingProcessor, self).info()
info['task'] = kashgari.LABELING
return info
def _build_label_dict(self,
label_list: List[List[str]]):
"""
Build label2idx dict for sequence labeling task
Args:
label_list: corpus label list
"""
label2idx: Dict[str: int] = {
self.token_pad: 0
}
token2count = {}
for sequence in label_list:
for label in sequence:
count = token2count.get(label, 0)
token2count[label] = count + 1
sorted_token2count = sorted(token2count.items(),
key=operator.itemgetter(1),
reverse=True)
token2count = collections.OrderedDict(sorted_token2count)
for token in token2count.keys():
if token not in label2idx:
label2idx[token] = len(label2idx)
self.label2idx = label2idx
self.idx2label = dict([(value, key)
for key, value in self.label2idx.items()])
logging.debug(f"build label2idx dict finished, contains {len(self.label2idx)} labels.")
def process_y_dataset(self,
data: List[List[str]],
max_len: Optional[int] = None,
subset: Optional[List[int]] = None) -> np.ndarray:
if subset is not None:
target = utils.get_list_subset(data, subset)
else:
target = data[:]
numerized_samples = self.numerize_label_sequences(target)
padded_seq = pad_sequences(
numerized_samples, max_len, padding='post', truncating='post')
return to_categorical(padded_seq, len(self.label2idx))
def numerize_token_sequences(self,
sequences: List[List[str]]):
result = []
for seq in sequences:
if self.add_bos_eos:
seq = [self.token_bos] + seq + [self.token_eos]
unk_index = self.token2idx[self.token_unk]
result.append([self.token2idx.get(token, unk_index) for token in seq])
return result
def numerize_label_sequences(self,
sequences: List[List[str]]) -> List[List[int]]:
result = []
for seq in sequences:
if self.add_bos_eos:
seq = [self.token_pad] + seq + [self.token_pad]
result.append([self.label2idx[label] for label in seq])
return result
def reverse_numerize_label_sequences(self,
sequences,
lengths=None):
result = []
for index, seq in enumerate(sequences):
labels = []
if self.add_bos_eos:
seq = seq[1:]
for idx in seq:
labels.append(self.idx2label[idx])
if lengths is not None:
labels = labels[:lengths[index]]
result.append(labels)
return result
if __name__ == "__main__":
from kashgari.corpus import ChineseDailyNerCorpus
x, y = ChineseDailyNerCorpus.load_data()
p = LabelingProcessor()
p.analyze_corpus(x, y)
r = p.process_x_dataset(x, subset=[10, 12, 20])
print(r)
| 125softNLP | /125softNLP-0.0.1-py3-none-any.whl/pysoftNLP/ner/kashgari/processors/labeling_processor.py | labeling_processor.py |
# encoding: utf-8
# author: BrikerMan
# contact: eliyar917@gmail.com
# blog: https://eliyar.biz
# file: __init__.py.py
# time: 2019-05-20 10:54
from kashgari.processors.classification_processor import ClassificationProcessor
from kashgari.processors.labeling_processor import LabelingProcessor
from kashgari.processors.scoring_processor import ScoringProcessor
| 125softNLP | /125softNLP-0.0.1-py3-none-any.whl/pysoftNLP/ner/kashgari/processors/__init__.py | __init__.py |
from typing import List, Optional
import numpy as np
from tensorflow.python.keras.utils import to_categorical
import kashgari
from kashgari import utils
from kashgari.processors.base_processor import BaseProcessor
from sklearn.preprocessing import MultiLabelBinarizer
class ClassificationProcessor(BaseProcessor):
"""
Corpus Pre Processor class
"""
def __init__(self, multi_label=False, **kwargs):
super(ClassificationProcessor, self).__init__(**kwargs)
self.multi_label = multi_label
if self.label2idx:
self.multi_label_binarizer: MultiLabelBinarizer = MultiLabelBinarizer(classes=list(self.label2idx.keys()))
self.multi_label_binarizer.fit([])
else:
self.multi_label_binarizer: MultiLabelBinarizer = None
def info(self):
info = super(ClassificationProcessor, self).info()
info['task'] = kashgari.CLASSIFICATION
info['config']['multi_label'] = self.multi_label
return info
def _build_label_dict(self,
labels: List[str]):
if self.multi_label:
label_set = set()
for i in labels:
label_set = label_set.union(list(i))
else:
label_set = set(labels)
self.label2idx = {}
for idx, label in enumerate(sorted(label_set)):
self.label2idx[label] = len(self.label2idx)
self.idx2label = dict([(value, key) for key, value in self.label2idx.items()])
self.dataset_info['label_count'] = len(self.label2idx)
self.multi_label_binarizer = MultiLabelBinarizer(classes=list(self.label2idx.keys()))
def process_y_dataset(self,
data: List[str],
max_len: Optional[int] = None,
subset: Optional[List[int]] = None) -> np.ndarray:
if subset is not None:
target = utils.get_list_subset(data, subset)
else:
target = data
if self.multi_label:
return self.multi_label_binarizer.fit_transform(target)
else:
numerized_samples = self.numerize_label_sequences(target)
return to_categorical(numerized_samples, len(self.label2idx))
def numerize_token_sequences(self,
sequences: List[List[str]]):
result = []
for seq in sequences:
if self.add_bos_eos:
seq = [self.token_bos] + seq + [self.token_eos]
unk_index = self.token2idx[self.token_unk]
result.append([self.token2idx.get(token, unk_index) for token in seq])
return result
def numerize_label_sequences(self,
sequences: List[str]) -> List[int]:
"""
Convert label sequence to label-index sequence
``['O', 'O', 'B-ORG'] -> [0, 0, 2]``
Args:
sequences: label sequence, list of str
Returns:
label-index sequence, list of int
"""
return [self.label2idx[label] for label in sequences]
def reverse_numerize_label_sequences(self, sequences, **kwargs):
if self.multi_label:
return self.multi_label_binarizer.inverse_transform(sequences)
else:
return [self.idx2label[label] for label in sequences]
if __name__ == "__main__":
from kashgari.corpus import SMP2018ECDTCorpus
x, y = SMP2018ECDTCorpus.load_data()
p = ClassificationProcessor()
p.analyze_corpus(x, y)
| 125softNLP | /125softNLP-0.0.1-py3-none-any.whl/pysoftNLP/ner/kashgari/processors/classification_processor.py | classification_processor.py |
# -*- coding: utf-8 -*-
# @Time : 2020/8/12-22:05
# @Author : 贾志凯
# @File : __init__.py.py
# @Software: win10 python3.6 PyCharm
| 125softNLP | /125softNLP-0.0.1-py3-none-any.whl/pysoftNLP/bert_pre training/__init__.py | __init__.py |
# -*- coding: utf-8 -*-
# @Time : 2020/8/12-22:05
# @Author : 贾志凯
# @File : __init__.py.py
# @Software: win10 python3.6 PyCharm
| 125softNLP | /125softNLP-0.0.1-py3-none-any.whl/pysoftNLP/bert_pre training/chinese_L-12_H-768_A-12/__init__.py | __init__.py |
# -*- coding: utf-8 -*-
# @Time : 2020/8/12-22:05
# @Author : 贾志凯
# @File : __init__.py.py
# @Software: win10 python3.6 PyCharm
| 125softNLP | /125softNLP-0.0.1-py3-none-any.whl/pysoftNLP/bert_pre training/chinese_roberta_wwm_large_ext_L-24_H-1024_A-16/__init__.py | __init__.py |
# -*- coding: utf-8 -*-
# @Time : 2020/11/4-16:05
# @Author : 贾志凯 15716539228@163.com
# @File : keyword.py
# @Software: win10 python3.6 PyCharm
import math
import jieba
import jieba.posseg as psg
from gensim import corpora, models
from jieba import analyse
import functools
# 停用词表加载方法
def get_stopword_list():
# 停用词表存储路径,每一行为一个词,按行读取进行加载
# 进行编码转换确保匹配准确率
stop_word_path = 'D:\pysoftNLP_resources\extraction\stopword\stopword.txt'
stopword_list = [sw.replace('\n', '') for sw in open(stop_word_path,encoding='utf-8').readlines()]
return stopword_list
# 分词方法,调用结巴接口
def seg_to_list(sentence, pos=False):
if not pos:
# 不进行词性标注的分词方法
seg_list = jieba.cut(sentence)
else:
# 进行词性标注的分词方法
seg_list = psg.cut(sentence)
return seg_list
# 去除干扰词
def word_filter(seg_list, pos=False):
stopword_list = get_stopword_list()
filter_list = []
# 根据POS参数选择是否词性过滤
## 不进行词性过滤,则将词性都标记为n,表示全部保留
for seg in seg_list:
if not pos:
word = seg
flag = 'n'
else:
word = seg.word
flag = seg.flag
if not flag.startswith('n'):
continue
# 过滤停用词表中的词,以及长度为<2的词
if not word in stopword_list and len(word) > 1:
filter_list.append(word)
return filter_list
# 数据加载,pos为是否词性标注的参数,corpus_path为数据集路径
def load_data(pos=False, corpus_path='D:\pysoftNLP_resources\extraction\stopword\corpus.txt'):
# 调用上面方式对数据集进行处理,处理后的每条数据仅保留非干扰词
doc_list = []
for line in open(corpus_path, 'r',encoding='utf-8'):
content = line.strip()
seg_list = seg_to_list(content, pos)
filter_list = word_filter(seg_list, pos)
doc_list.append(filter_list)
return doc_list
# idf值统计方法
def train_idf(doc_list):
idf_dic = {}
# 总文档数
tt_count = len(doc_list)
# 每个词出现的文档数
for doc in doc_list:
for word in set(doc):
idf_dic[word] = idf_dic.get(word, 0.0) + 1.0
# 按公式转换为idf值,分母加1进行平滑处理
for k, v in idf_dic.items():
idf_dic[k] = math.log(tt_count / (1.0 + v))
# 对于没有在字典中的词,默认其仅在一个文档出现,得到默认idf值
default_idf = math.log(tt_count / (1.0))
return idf_dic, default_idf
# 排序函数,用于topK关键词的按值排序
def cmp(e1, e2):
import numpy as np
res = np.sign(e1[1] - e2[1])
if res != 0:
return res
else:
a = e1[0] + e2[0]
b = e2[0] + e1[0]
if a > b:
return 1
elif a == b:
return 0
else:
return -1
# TF-IDF类
class TfIdf(object):
# 四个参数分别是:训练好的idf字典,默认idf值,处理后的待提取文本,关键词数量
def __init__(self, idf_dic, default_idf, word_list, keyword_num):
self.word_list = word_list
self.idf_dic, self.default_idf = idf_dic, default_idf
self.tf_dic = self.get_tf_dic()
self.keyword_num = keyword_num
# 统计tf值
def get_tf_dic(self):
tf_dic = {}
for word in self.word_list:
tf_dic[word] = tf_dic.get(word, 0.0) + 1.0
tt_count = len(self.word_list)
for k, v in tf_dic.items():
tf_dic[k] = float(v) / tt_count
return tf_dic
# 按公式计算tf-idf
def get_tfidf(self):
tfidf_dic = {}
for word in self.word_list:
idf = self.idf_dic.get(word, self.default_idf)
tf = self.tf_dic.get(word, 0)
tfidf = tf * idf
tfidf_dic[word] = tfidf
tfidf_dic.items()
# 根据tf-idf排序,去排名前keyword_num的词作为关键词
for k, v in sorted(tfidf_dic.items(), key=functools.cmp_to_key(cmp), reverse=True)[:self.keyword_num]:
print(k + ",", end='')
print()
#主题模型
class TopicModel(object):
# 三个传入参数:处理后的数据集,关键词数量,具体模型(LSI、LDA),主题数量
def __init__(self, doc_list, keyword_num, model='LSI', num_topics=4):
# 使用gensim的接口,将文本转为向量化表示
# 先构建词空间
self.dictionary = corpora.Dictionary(doc_list)
# 使用BOW模型向量化
corpus = [self.dictionary.doc2bow(doc) for doc in doc_list]
# 对每个词,根据tf-idf进行加权,得到加权后的向量表示
self.tfidf_model = models.TfidfModel(corpus)
self.corpus_tfidf = self.tfidf_model[corpus]
self.keyword_num = keyword_num
self.num_topics = num_topics
# 选择加载的模型
if model == 'LSI':
self.model = self.train_lsi()
else:
self.model = self.train_lda()
# 得到数据集的主题-词分布
word_dic = self.word_dictionary(doc_list)
self.wordtopic_dic = self.get_wordtopic(word_dic)
def train_lsi(self):
lsi = models.LsiModel(self.corpus_tfidf, id2word=self.dictionary, num_topics=self.num_topics)
return lsi
def train_lda(self):
lda = models.LdaModel(self.corpus_tfidf, id2word=self.dictionary, num_topics=self.num_topics)
return lda
def get_wordtopic(self, word_dic):
wordtopic_dic = {}
for word in word_dic:
single_list = [word]
wordcorpus = self.tfidf_model[self.dictionary.doc2bow(single_list)]
wordtopic = self.model[wordcorpus]
wordtopic_dic[word] = wordtopic
return wordtopic_dic
# 计算词的分布和文档的分布的相似度,取相似度最高的keyword_num个词作为关键词
def get_simword(self, word_list):
sentcorpus = self.tfidf_model[self.dictionary.doc2bow(word_list)]
senttopic = self.model[sentcorpus]
# 余弦相似度计算
def calsim(l1, l2):
a, b, c = 0.0, 0.0, 0.0
for t1, t2 in zip(l1, l2):
x1 = t1[1]
x2 = t2[1]
a += x1 * x1
b += x1 * x1
c += x2 * x2
sim = a / math.sqrt(b * c) if not (b * c) == 0.0 else 0.0
return sim
# 计算输入文本和每个词的主题分布相似度
sim_dic = {}
for k, v in self.wordtopic_dic.items():
if k not in word_list:
continue
sim = calsim(v, senttopic)
sim_dic[k] = sim
for k, v in sorted(sim_dic.items(), key=functools.cmp_to_key(cmp), reverse=True)[:self.keyword_num]:
print(k + ", ", end='')
print()
# 词空间构建方法和向量化方法,在没有gensim接口时的一般处理方法
def word_dictionary(self, doc_list):
dictionary = []
for doc in doc_list:
dictionary.extend(doc)
dictionary = list(set(dictionary))
return dictionary
def doc2bowvec(self, word_list):
vec_list = [1 if word in word_list else 0 for word in self.dictionary]
return vec_list
def tfidf_extract(word_list, pos=False, keyword_num=10):
doc_list = load_data(pos)
idf_dic, default_idf = train_idf(doc_list)
tfidf_model = TfIdf(idf_dic, default_idf, word_list, keyword_num)
tfidf_model.get_tfidf()
def textrank_extract(text, pos = False, keyword_num=10):
textrank = analyse.textrank
keywords = textrank(text, keyword_num)
# 输出抽取出的关键词
for keyword in keywords:
print(keyword + ",", end='')
print()
def topic_extract(word_list, model, pos=False, keyword_num=10):
doc_list = load_data(pos)
topic_model = TopicModel(doc_list, keyword_num, model=model)
topic_model.get_simword(word_list)
if __name__ == '__main__':
text = '6月19日,《2012年度“中国爱心城市”公益活动新闻发布会》在京举行。' + \
'中华社会救助基金会理事长许嘉璐到会讲话。基金会高级顾问朱发忠,全国老龄' + \
'办副主任朱勇,民政部社会救助司助理巡视员周萍,中华社会救助基金会副理事长耿志远,' + \
'重庆市民政局巡视员谭明政。晋江市人大常委会主任陈健倩,以及10余个省、市、自治区民政局' + \
'领导及四十多家媒体参加了发布会。中华社会救助基金会秘书长时正新介绍本年度“中国爱心城' + \
'市”公益活动将以“爱心城市宣传、孤老关爱救助项目及第二届中国爱心城市大会”为主要内容,重庆市' + \
'、呼和浩特市、长沙市、太原市、蚌埠市、南昌市、汕头市、沧州市、晋江市及遵化市将会积极参加' + \
'这一公益活动。中国雅虎副总编张银生和凤凰网城市频道总监赵耀分别以各自媒体优势介绍了活动' + \
'的宣传方案。会上,中华社会救助基金会与“第二届中国爱心城市大会”承办方晋江市签约,许嘉璐理' + \
'事长接受晋江市参与“百万孤老关爱行动”向国家重点扶贫地区捐赠的价值400万元的款物。晋江市人大' + \
'常委会主任陈健倩介绍了大会的筹备情况。'
print(text)
pos = True
seg_list = seg_to_list(text, pos)
filter_list = word_filter(seg_list, pos)
print('TF-IDF模型结果:')
tfidf_extract(filter_list,pos=False,keyword_num=5)
print('TextRank模型结果:')
textrank_extract(text)
print('LSI模型结果:')
topic_extract(filter_list, 'LSI', pos)
print('LDA模型结果:')
topic_extract(filter_list, 'LDA', pos)
| 125softNLP | /125softNLP-0.0.1-py3-none-any.whl/pysoftNLP/extraction/keyword.py | keyword.py |
# -*- coding: utf-8 -*-
# @Time : 2020/8/12-22:05
# @Author : 贾志凯
# @File : __init__.py.py
# @Software: win10 python3.6 PyCharm
__all__ = ['keyword'] | 125softNLP | /125softNLP-0.0.1-py3-none-any.whl/pysoftNLP/extraction/__init__.py | __init__.py |
# 文本分类的简易数据扩充技术(数据增强)
# Jason Wei and Kai Zou
from pysoftNLP.enhancement.eda import *
# #要从命令行接收的参数
# import argparse
# ap = argparse.ArgumentParser()
# ap.add_argument("--input", required=True, type=str, help="输入需要增强的文件")
# ap.add_argument("--output", required=False, type=str, help="输出增强之后的文件")
# ap.add_argument("--num_aug", required=False, type=int, help="每个原始句子的增量之后的句子数")
# ap.add_argument("--alpha", required=False, type=float, help="每个句子中要更改的单词百分比")
# args = ap.parse_args()
#
# #the output file
# output = None
# #如果没有接收到转化之后存储位置, 就在当前位置新建
# if args.output:
# output = args.output
# else:
# from os.path import dirname, basename, join
# output = join(dirname(args.input), 'eda_' + basename(args.input))
#
# #每个原始句子的增量之后的句子数
# num_aug = 9 #default
# if args.num_aug:
# num_aug = args.num_aug
#
# #how much to change each sentence
# alpha = 0.1#default
# if args.alpha:
# alpha = args.alpha
#使用标准扩充生成更多数据(train_orig:原始的文件, output_file:输出文件, alpha:修改比例, num_aug=9)
def gen_eda(train_orig, output_file, alpha, num_aug=9):
writer = open(output_file, 'w',encoding='utf-8')
lines = open(train_orig, 'r',encoding='utf-8').readlines()
#csv格式按行读取,按,分割, 文本数据在第二列
for i, line in enumerate(lines):
parts = line[:-1].split(',')
label = parts[2] #标注
sentence = parts[1] #文本
aug_sentences = eda(sentence, alpha_sr=alpha, alpha_ri=alpha, alpha_rs=alpha, p_rd=alpha, num_aug=num_aug)
for aug_sentence in aug_sentences:
writer.write(label + "," + aug_sentence + '\n')
writer.close()
print("generated augmented sentences with eda for " + train_orig + " to " + output_file + " with num_aug=" + str(num_aug))
# gen_eda(args.input, output, alpha=alpha, num_aug=num_aug)
| 125softNLP | /125softNLP-0.0.1-py3-none-any.whl/pysoftNLP/enhancement/augment.py | augment.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.