code
stringlengths 1
5.19M
| package
stringlengths 1
81
| path
stringlengths 9
304
| filename
stringlengths 4
145
|
---|---|---|---|
import requests
import json
#from bs4 import BeautifulSoup
import execjs #必须,需要先用pip 安装,用来执行js脚本
class Py4Js():
def __init__(self):
self.ctx = execjs.compile("""
function TL(a) {
var k = "";
var b = 406644;
var b1 = 3293161072;
var jd = ".";
var $b = "+-a^+6";
var Zb = "+-3^+b+-f";
for (var e = [], f = 0, g = 0; g < a.length; g++) {
var m = a.charCodeAt(g);
128 > m ? e[f++] = m : (2048 > m ? e[f++] = m >> 6 | 192 : (55296 == (m & 64512) && g + 1 < a.length && 56320 == (a.charCodeAt(g + 1) & 64512) ? (m = 65536 + ((m & 1023) << 10) + (a.charCodeAt(++g) & 1023),
e[f++] = m >> 18 | 240,
e[f++] = m >> 12 & 63 | 128) : e[f++] = m >> 12 | 224,
e[f++] = m >> 6 & 63 | 128),
e[f++] = m & 63 | 128)
}
a = b;
for (f = 0; f < e.length; f++) a += e[f],
a = RL(a, $b);
a = RL(a, Zb);
a ^= b1 || 0;
0 > a && (a = (a & 2147483647) + 2147483648);
a %= 1E6;
return a.toString() + jd + (a ^ b)
};
function RL(a, b) {
var t = "a";
var Yb = "+";
for (var c = 0; c < b.length - 2; c += 3) {
var d = b.charAt(c + 2),
d = d >= t ? d.charCodeAt(0) - 87 : Number(d),
d = b.charAt(c + 1) == Yb ? a >>> d: a << d;
a = b.charAt(c) == Yb ? a + d & 4294967295 : a ^ d
}
return a
}
""")
def getTk(self,text):
return self.ctx.call("TL",text)
#英转中
def buildUrl_e2c(text,tk):
baseUrl='https://translate.google.cn/translate_a/single'
baseUrl+='?client=webapp&'
baseUrl+='sl=en&'
baseUrl+='tl=zh-CN&'
baseUrl+='hl=en&'
baseUrl+='dt=at&'
baseUrl+='dt=bd&'
baseUrl+='dt=ex&'
baseUrl+='dt=ld&'
baseUrl+='dt=md&'
baseUrl+='dt=qca&'
baseUrl+='dt=rw&'
baseUrl+='dt=rm&'
baseUrl+='dt=ss&'
baseUrl+='dt=t&'
baseUrl+='ie=UTF-8&'
baseUrl+='oe=UTF-8&'
baseUrl+='otf=1&'
baseUrl+='pc=1&'
baseUrl+='ssel=0&'
baseUrl+='tsel=0&'
baseUrl+='kc=2&'
baseUrl+='tk='+str(tk)+'&'
baseUrl+='q='+text
return baseUrl
#中转英
def buildUrl_c2e(text,tk):
baseUrl='https://translate.google.cn/translate_a/single'
baseUrl+='?client=webapp&'
baseUrl+='sl=zh-CN&'
baseUrl+='tl=en&'
baseUrl+='hl=zh-CN&'
baseUrl+='dt=at&'
baseUrl+='dt=bd&'
baseUrl+='dt=ex&'
baseUrl+='dt=ld&'
baseUrl+='dt=md&'
baseUrl+='dt=qca&'
baseUrl+='dt=rw&'
baseUrl+='dt=rm&'
baseUrl+='dt=ss&'
baseUrl+='dt=t&'
baseUrl+='ie=UTF-8&'
baseUrl+='oe=UTF-8&'
baseUrl+='otf=1&'
baseUrl+='pc=1&'
baseUrl+='ssel=0&'
baseUrl+='tsel=0&'
baseUrl+='kc=2&'
baseUrl+='tk='+str(tk)+'&'
baseUrl+='q='+text
return baseUrl
def translate(js, text, type):
header={
'authority':'translate.google.cn',
'method':'GET',
'path':'',
'scheme':'https',
'accept':'*/*',
'accept-encoding':'gzip, deflate, br',
'accept-language':'zh-CN,zh;q=0.9',
'cookie':'',
'user-agent':'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.108 Safari/537.36',
'x-client-data':'CIa2yQEIpbbJAQjBtskBCPqcygEIqZ3KAQioo8oBGJGjygE='
}
if type == 'e2c' :
url = buildUrl_e2c(text,js.getTk(text))
elif type == 'c2e' :
url = buildUrl_c2e(text, js.getTk(text))
res=''
try:
r=requests.get(url)
result=json.loads(r.text)
if result[7]!=None and len(result[7])!=0:
# 如果我们文本输错,提示你是不是要找xxx的话,那么重新把xxx正确的翻译之后返回
try:
correctText=result[7][0].replace('<b><i>',' ').replace('</i></b>','')
print(correctText)
if type == 'e2c' :
correctUrl = buildUrl_e2c(correctText,js.getTk(correctText))
elif type == 'c2e' :
correctUrl = buildUrl_c2e(correctText, js.getTk(correctText))
correctR=requests.get(correctUrl)
newResult=json.loads(correctR.text)
res=newResult[0][0][0]
except Exception as e:
print(e)
#res=result[0][0][0]
res=''
else:
res=result[0][0][0]
except Exception as e:
res=''
print(url)
print("翻译"+text+"失败")
print("错误信息:")
print(e)
finally:
return res
if __name__ == '__main__':
js=Py4Js()
res=translate(js, '你好啊', 'c2e')
print("中文转英文:", res)
res=translate(js, 'hello', 'e2c')
print("英文转中文:", res)
| 125softNLP | /125softNLP-0.0.1-py3-none-any.whl/pysoftNLP/enhancement/translate.py | translate.py |
# -*- coding: utf-8 -*-
# @Time : 2020/11/3-13:31
# @Author : 贾志凯 15716539228@163.com
# @File : run.py
# @Software: win10 python3.6 PyCharm | 125softNLP | /125softNLP-0.0.1-py3-none-any.whl/pysoftNLP/enhancement/run.py | run.py |
import pandas as pd
import sys
#import xlrd
#import xlwt
#import sys
#from datetime import date,datetime
def xlsx_to_csv_pd(file_src, file_dest):
data_xls = pd.read_excel(file_src, index_col=0)
data_xls.to_csv(file_dest, encoding='utf-8')
'''
def csv_to_xlsx_pd():
csv = pd.read_csv('1.csv', encoding='utf-8')
csv.to_excel('1.xlsx', sheet_name='data')
def read_excel(filename):
workbook = xlrd.open_workbook(filename)
sheet2 = workbook.sheet_by_index(0)
for row in xrange(0, sheet2.nrows):
rows = sheet2.row_values(row)
def _tostr(cell):
if type(u'') == type(cell):
return "\"%s\"" % cell.encode('utf8')
else:
return "\"%s\"" % str(cell)
print (','.join([_tostr(cell) for cell in rows ]))
'''
if len(sys.argv) != 2:
print("只能输入1个参数\n请输入需要转换的文件名,如python transform.py a.xlsx")
else :
strtmp = sys.argv[1]
outfile = strtmp[:strtmp.find('.')]
outfile = outfile + '.csv'
xlsx_to_csv_pd(strtmp, outfile)
| 125softNLP | /125softNLP-0.0.1-py3-none-any.whl/pysoftNLP/enhancement/transform.py | transform.py |
# 文本分类的简易数据扩充技术(数据增强)
# Jason Wei and Kai Zou
import random
from random import shuffle
from pyhanlp import *
from pysoftNLP.enhancement.ciLin import CilinSimilarity # 使用基于信息内容的算法来计算词语相似度 基于哈工大同义词词林扩展版计算语义相似度
random.seed(1)
#stop words list
stop_words = set()
def stopword_init():
basis = 'D:\pysoftNLP_resources\enhancement'
stopwords_file = '哈工大停用词表.txt'
model_path = os.path.join(basis, stopwords_file)
with open(model_path, 'r',encoding='utf-8') as f:
for line in f.readlines():
stop_words.add(line.strip())
stopwords_file = '百度停用词表.txt'
model_path = os.path.join(basis, stopwords_file)
with open(model_path, 'r',encoding='utf-8') as f:
for line in f.readlines():
stop_words.add(line.strip())
#stopwords_file = '中文停用词表.txt'
#with open(stopwords_file, 'r',encoding='utf-8') as f:
# for line in f.readlines():
# stop_words.add(line.strip())
print("已经初始化停用词表词个数: ", len(stop_words))
stopword_init()
synonym_handler = CilinSimilarity()
#pyhanlp进行分词
def get_segment(line):
HanLP.Config.ShowTermNature = False
StandardTokenizer = JClass("com.hankcs.hanlp.tokenizer.StandardTokenizer")
segment_list = StandardTokenizer.segment(line)
terms_list = []
for terms in segment_list :
terms_list.append(str(terms))
#print(terms_list)
return terms_list
########################################################################
# Synonym substitution 同义词替换
# Replace n words in the sentence with synonyms from wordnet (用wordnet中的同义词替换句子中的n个单词)
########################################################################
def synonym_replacement(words, n):
new_words = words.copy()
random_word_list = list(set([word for word in words if word not in stop_words])) #将单词不在停用词的词语形成列表
random.shuffle(random_word_list)
num_replaced = 0
for random_word in random_word_list:
synonyms = get_synonyms(random_word)
if len(synonyms) >= 1:
synonym = random.choice(list(synonyms)) #随机选择一个词语
new_words = [synonym if word == random_word else word for word in new_words]
print("replaced", random_word, "with", synonym)
num_replaced += 1
if num_replaced >= n: #only replace up to n words
break
#this is stupid but we need it, trust me
sentence = ' '.join(new_words)
#print(sentence,'111111111111111111111111111')
new_words = sentence.split(' ')
#print('222222222222222222222222222',new_words)
return new_words
def get_synonyms(word):
synonyms = set()
if word not in synonym_handler.vocab:
print(word, '未被词林收录!')
else:
codes = synonym_handler.word_code[word]
for code in codes:
key = synonym_handler.code_word[code]
synonyms.update(key)
if word in synonyms:
synonyms.remove(word)
return list(synonyms)
########################################################################
# Random deletion(随机删除)
# Randomly delete words from the sentence with probability p (用概率p随机删除句子中的单词)
########################################################################
def random_deletion(words, p):
#obviously, if there's only one word, don't delete it (显然,如果只有一个词,就不要删除它)
if len(words) == 1:
return words
#randomly delete words with probability p
new_words = []
for word in words:
r = random.uniform(0, 1)
if r > p:
new_words.append(word)
#if you end up deleting all words, just return a random word
if len(new_words) == 0:
rand_int = random.randint(0, len(words)-1)
return [words[rand_int]]
return new_words
########################################################################
# Random swap (随机交换)
# Randomly swap two words in the sentence n times (在句子中随机交换两个单词n次)
########################################################################
def random_swap(words, n):
new_words = words.copy()
for _ in range(n):
new_words = swap_word(new_words)
return new_words
def swap_word(new_words):
random_idx_1 = random.randint(0, len(new_words)-1)
random_idx_2 = random_idx_1
counter = 0
while random_idx_2 == random_idx_1:
random_idx_2 = random.randint(0, len(new_words)-1)
counter += 1
if counter > 3:
return new_words
new_words[random_idx_1], new_words[random_idx_2] = new_words[random_idx_2], new_words[random_idx_1]
return new_words
########################################################################
# Random insertion(随机插入)
# Randomly insert n words into the sentence
########################################################################
def random_insertion(words, n):
new_words = words.copy()
for _ in range(n):
add_word(new_words)
return new_words
def add_word(new_words):
synonyms = []
counter = 0
while len(synonyms) < 1:
random_word = new_words[random.randint(0, len(new_words)-1)]
synonyms = get_synonyms(random_word)
counter += 1
if counter >= 10:
return
random_synonym = synonyms[0]
random_idx = random.randint(0, len(new_words)-1)
new_words.insert(random_idx, random_synonym)
########################################################################
# main data augmentation function
########################################################################
def eda(sentence, alpha_sr=0.1, alpha_ri=0.1, alpha_rs=0.1, p_rd=0.1, num_aug=9):
words = get_segment(sentence) # 分词
num_words = len(words)
augmented_sentences = []
num_new_per_technique = int(num_aug/4)+1 #使用几种技术(目前是四种,所以除以4)
n_sr = max(1, int(alpha_sr*num_words)) #几种技术中替换的单词数
n_ri = max(1, int(alpha_ri*num_words))
n_rs = max(1, int(alpha_rs*num_words))
#sr(随机交换)
for _ in range(num_new_per_technique):
a_words = synonym_replacement(words, n_sr) #
augmented_sentences.append(''.join(a_words))
#ri(随机插入)
for _ in range(num_new_per_technique):
a_words = random_insertion(words, n_ri)
augmented_sentences.append(''.join(a_words))
#rs
for _ in range(num_new_per_technique):
a_words = random_swap(words, n_rs)
augmented_sentences.append(''.join(a_words))
#rd
for _ in range(num_new_per_technique):
a_words = random_deletion(words, p_rd)
augmented_sentences.append(''.join(a_words))
augmented_sentences = [get_segment(sentence) for sentence in augmented_sentences]
shuffle(augmented_sentences)
#trim so that we have the desired number of augmented sentences (修剪以获得所需数量的增广句子)
if num_aug >= 1:
augmented_sentences = augmented_sentences[:num_aug]
else:
keep_prob = num_aug / len(augmented_sentences)
augmented_sentences = [s for s in augmented_sentences if random.uniform(0, 1) < keep_prob]
augmented_sentences = [''.join(sentence) for sentence in augmented_sentences]
#append the original sentence
augmented_sentences.append(sentence)
return list(set(augmented_sentences))
| 125softNLP | /125softNLP-0.0.1-py3-none-any.whl/pysoftNLP/enhancement/eda.py | eda.py |
# -*- coding: utf-8 -*-
'''
@author: Yalei Meng
@contact: yaleimeng@sina.com
@license: (C) Copyright 2017, HUST Corporation Limited.
@DateTime: Created on 2018/5/3,at 19:13
@desc:使用基于信息内容的算法来计算词语相似度。参考文献:
【1】彭琦, 朱新华, 陈意山,等. 基于信息内容的词林词语相似度计算[J]. 计算机应用研究, 2018(2):400-404.
'''
#import math
import os
class CilinSimilarity(object):
""" 基于哈工大同义词词林扩展版计算语义相似度 """
def __init__(self):
"""
'code_word' 以编码为key,单词list为value的dict,一个编码有多个单词
'word_code' 以单词为key,编码为value的dict,一个单词可能有多个编码
'vocab' 所有不重复的单词,便于统计词汇总数。
'mydict' 每个大中小类编码对应的下位节点数量。
"""
self.code_word = {}
self.word_code = {}
self.vocab = set() #定义一个词典集合
file_in = 'D:\pysoftNLP_resources\enhancement'
cilin = os.path.join(file_in,'New_cilin.txt')
self.file = cilin
# self.mydict = {}
self.read_cilin()
def read_cilin(self):
"""
读入同义词词林,编码为key,词群为value,保存在self.code_word
单词为key,编码群为value,保存在self.word_code
所有单词保存在self.vocab
"""
head = set()
with open(self.file, 'r', encoding='gbk') as f:
for line in f.readlines():
res = line.split()
code = res[0] # 词义编码
words = res[1:] # 同组的多个词
self.vocab.update(words) # 一组词更新到词汇表中
self.code_word[code] = words # 字典,目前键是词义编码,值是一组单词。
for w in words:
if w in self.word_code.keys(): # 最终目的:键是单词本身,值是词义编码。
self.word_code[w].append(code) # 如果单词已经在,就把当前编码增加到字典中
else:
self.word_code[w] = [code] # 反之,则在字典中添加该项。
# 第一次遍历,得到大中小类的代码。
if len(code) < 6:
continue
fathers = [code[:1], code[:2], code[:4], code[:5], code[:7]]
head.update(fathers)
fatherlist = sorted(list(head))
'''
with open(self.file, 'r', encoding='gbk') as f:
# 第二次遍历:得到大中小类的数量。更新到字典mydict里面。
for ele in fatherlist:
self.mydict[ele] = 0
for line in f.readlines():
res = line.split()
code = res[0] # 词义编码
words = res[1:] # 同组的多个词
if len(code) > 5 and code[:5] in self.mydict.keys():
self.mydict[code[:7]] += len(words)
self.mydict[code[:5]] += len(words)
if len(code) > 4 and code[:4] in self.mydict.keys():
self.mydict[code[:4]] += len(words)
if len(code) > 2 and code[:2] in self.mydict.keys():
self.mydict[code[:2]] += len(words)
if len(code) > 1 and code[:1] in self.mydict.keys():
self.mydict[code[:1]] += len(words)
def get_common_str(self, c1, c2):
""" 获取两个字符的公共部分,注意有些层是2位数字 """
res = ''
for i, j in zip(c1, c2):
if i == j:
res += i
else:
break
if 3 == len(res) or 6 == len(res):
res = res[:-1]
return res
def Info_Content(self, concept):
if concept == '':
return 0
total =0
for ele in self.mydict.keys():
if len(ele)==1:
total += self.mydict[ele]
FenMu = math.log(total,2)
#print('总结点数',total,FenMu)
hypo = 1
if concept in self.mydict.keys():
hypo += self.mydict[concept]
info = math.log(hypo, 2) / FenMu
# print(concept, '下位节点数:', hypo,'信息内容:',1-info)
return 1 - info
def sim_by_IC(self, c1, c2):
# 找到公共字符串
LCS = self.get_common_str(c1, c2)
distance = self.Info_Content(LCS) - (self.Info_Content(c1) + self.Info_Content(c2)) / 2
return distance + 1
def sim2018(self, w1, w2):
""" 按照论文彭琦, 朱新华, 陈意山,等. 基于信息内容的词林词语相似度计算[J]. 计算机应用研究, 2018(2):400-404.计算相似度 """
for word in [w1, w2]:
if word not in self.vocab:
print(word, '未被词林词林收录!')
return 0 # 如果有一个词不在词林中,则相似度为0
# 获取两个词的编码列表
code1 = self.word_code[w1]
code2 = self.word_code[w2]
simlist = []
for c1 in code1: # 选取相似度最大值
for c2 in code2:
cur_sim = self.sim_by_IC(c1, c2)
simlist.append(cur_sim)
aver = sum(simlist) / len(simlist)
# print(sorted(simlist,reverse=True))
if len(simlist) < 2:
return simlist[0]
if max(simlist) > 0.7:
return max(simlist)
elif aver > 0.2:
return (sum(simlist) - max(simlist)) / (len(simlist) - 1)
else:
return min(simlist)
'''
| 125softNLP | /125softNLP-0.0.1-py3-none-any.whl/pysoftNLP/enhancement/ciLin.py | ciLin.py |
# -*- coding: utf-8 -*-
# @Time : 2020/8/12-22:05
# @Author : 贾志凯
# @File : __init__.py.py
# @Software: win10 python3.6 PyCharm
__all__ = ['augment','ciLin','eda','transform','translate']
| 125softNLP | /125softNLP-0.0.1-py3-none-any.whl/pysoftNLP/enhancement/__init__.py | __init__.py |
# -*- coding: utf-8 -*-
# @Time : 2020/8/12-22:05
# @Author : 贾志凯
# @File : __init__.py.py
# @Software: win10 python3.6 PyCharm
| 125softNLP | /125softNLP-0.0.1-py3-none-any.whl/pysoftNLP/clean/__init__.py | __init__.py |
# -*- coding: utf-8 -*-
# @Time : 2020/11/19-16:53
# @Author : 贾志凯 15716539228@163.com
# @File : logging.py
# @Software: win10 python3.6 PyCharm
import sys
class logger(object):
def __init__(self,filename):
self.terminal = sys.stdout
self.log = open(filename,"a")
def write(self,message):
self.terminal.write(message)
self.log.write(message)
def flush(self):
pass
# sys.stdout = logger('logging.txt') | 125softNLP | /125softNLP-0.0.1-py3-none-any.whl/pysoftNLP/utils/logging.py | logging.py |
# -*- coding: utf-8 -*-
# @Time : 2020/11/19-10:48
# @Author : 贾志凯 15716539228@163.com
# @File : down_resources.py
# @Software: win10 python3.6 PyCharm
from urllib import request, error
import sys
import zipfile
import tarfile
import socket
socket.setdefaulttimeout(15)
def progressbar(cur):
percent = '{:.2%}'.format(cur)
sys.stdout.write('\r')
sys.stdout.write('[%-100s] %s' % ('=' * int(cur*100), percent))
sys.stdout.flush()
# print(cur)
def schedule(blocknum,blocksize,totalsize):
'''
blocknum:当前已经下载的块
blocksize:每次传输的块大小
totalsize:网页文件总大小
'''
percent = 0
if totalsize == 0:
percent = 0
elif totalsize == -1 and blocknum==0:
print('响应失败,正在重新连接……')
download()
elif totalsize == -1 and blocknum != 0: #已经下载了,当前传输的为0, 可以忽略
pass
else:
percent = blocknum * blocksize / totalsize
progressbar(percent)
if percent > 1.0:
percent = 1.0
progressbar(percent)
# print('\n'+'download : %.2f%%' %(percent))
def download(url,path):
try:
filename,headers = request.urlretrieve(url, path, schedule)
# print("headers",headers)
except error.HTTPError as e:
print(e)
print(url + ' download failed!' + '\r\n')
print('请手动下载:%s' %url)
except error.URLError as e:
print(url + ' download failed!' + '\r\n')
print('请手动下载:%s' %url)
print(e)
except Exception as e:
print(e)
print('请手动下载:%s' %url)
else:
print('\r\n' + url + ' download successfully!')
print('文件的名字:',filename)
return filename
def unzip_file(zip_src):
r = zipfile.is_zipfile(zip_src)
dst_dir = str(zip_src).split('.')[0]
if r:
fz = zipfile.ZipFile(zip_src, 'r')
for file in fz.namelist():
fz.extract(file, dst_dir)
fz.close()# 关闭文件,必须有,释放内存
else:
print('This is not zip')
def unzip(path):
zip_file = zipfile.ZipFile(path)
dst_dir = str(path).split('.')[0]
zip_list = zip_file.namelist() # 得到压缩包里所有文件
for f in zip_list:
zip_file.extract(f,dst_dir) # 循环解压文件到指定目录
zip_file.close() # 关闭文件,必须有,释放内存
def untar(path = 'D:\pysoftNLP_resources\data.zip'):
tar = tarfile.open(path)
tar.extractall()
tar.close()
def download_decompress(url,path):
filename = download(url, path)
try:
if str(filename).split('.')[-1] == 'zip':
print('开始解压zip文件,请等待……')
# unzip()
unzip_file(filename)
print('解压完成,可以使用')
except Exception as e:
print(e)
print('解压失败,请手动解压')
try:
if str(filename).split('.')[-1] == 'gz':
print('开始解压tar.gz文件,请等待……')
untar()
print('解压完成,可以使用')
except Exception as e:
print(e)
print('解压失败,请手动解压')
# if __name__ == '__main__':
# print('开始下载:https://codeload.github.com/chengtingting980903/zzsnML/tar.gz/1.0.0')
# download_decompress()
# print('开始下载:https://github.com/xiaokai01/download_test/releases/download/0.0.1/863_classify_hy_1024_9.zip')
# download_decompress(url='https://github.com/chengtingting980903/zzsnML/releases/download/1.0.0/data.zip', path='data.zip')
# download_decompress(url= 'https://github.com/xiaokai01/download_test/releases/download/0.0.1/863_classify_hy_1024_9.zip', path= 'D:\pysoftNLP_resources\data.zip')
| 125softNLP | /125softNLP-0.0.1-py3-none-any.whl/pysoftNLP/utils/down_resources.py | down_resources.py |
# -*- coding: utf-8 -*-
# @Time : 2020/11/19-16:45
# @Author : 贾志凯 15716539228@163.com
# @File : down.py
# @Software: win10 python3.6 PyCharm
from pathlib import Path
import os
from pysoftNLP.utils import down_resources
import zipfile
def down(url,my_file, zip_file):
if not os.path.exists(my_file):
if not zipfile.is_zipfile(zip_file):
print('开始下载:', url)
down_resources.download_decompress(url, zip_file)
else:
print("已有下载包!开始解压!")
down_resources.unzip_file(zip_file)
else:
print("yes")
#下载预训练文件
def download_resource():
path = 'D:\pysoftNLP_resources'
if not os.path.exists(path):
os.mkdir(path)
#预训练的资源
pre_training_file_file = Path("D:\pysoftNLP_resources\pre_training_file")
pre_training_file_zip_file = "D:\pysoftNLP_resources\pre_training_file.zip"
pre_training_file_url = 'https://github.com/xiaokai01/pysoftNLP/releases/download/0.0.4/pre_training_file.zip'
down(pre_training_file_url,pre_training_file_file,pre_training_file_zip_file)
# 分类模型资源
classification_file = Path("D:\pysoftNLP_resources\classification")
classification_zip_file = "D:\pysoftNLP_resources\classification.zip"
classification_url = 'https://github.com/xiaokai01/pysoftNLP/releases/download/0.0.4/classification.zip'
down(classification_url, classification_file, classification_zip_file)
# 数据增强模型资源
enhancement_file = Path("D:\pysoftNLP_resources\enhancement")
enhancement_zip_file = "D:\pysoftNLP_resources\enhancement.zip"
enhancement_url = 'https://github.com/xiaokai01/pysoftNLP/releases/download/0.0.4/enhancement.zip'
down(enhancement_url, enhancement_file, enhancement_zip_file)
# 关键字抽取模型资源
extraction_file = Path("D:\pysoftNLP_resources\extraction")
extraction_zip_file = "D:\pysoftNLP_resources\extraction.zip"
extraction_url = 'https://github.com/xiaokai01/pysoftNLP/releases/download/0.0.4/extraction.zip'
down(extraction_url, extraction_file, extraction_zip_file)
# 命名实体识别模型资源
entity_recognition_file = Path("D:\pysoftNLP_resources\entity_recognition")
entity_recognition_zip_file = "D:\pysoftNLP_resources\entity_recognition.zip"
entity_recognition_url = 'https://github.com/xiaokai01/pysoftNLP/releases/download/0.0.4/entity_recognition.zip'
down(entity_recognition_url, entity_recognition_file, entity_recognition_zip_file)
| 125softNLP | /125softNLP-0.0.1-py3-none-any.whl/pysoftNLP/utils/down.py | down.py |
# -*- coding: utf-8 -*-
# @Time : 2020/11/19-10:47
# @Author : 贾志凯 15716539228@163.com
# @File : __init__.py.py
# @Software: win10 python3.6 PyCharm
__all__ = ['down_resources','down'] | 125softNLP | /125softNLP-0.0.1-py3-none-any.whl/pysoftNLP/utils/__init__.py | __init__.py |
from keras_bert import extract_embeddings
model_path = 'chinese_L-12_H-768_A-12'
texts = ['今晚(2月11日),钟南山院士接受总台央视记者独家专访,通过央视回应了近日媒体报道“钟南山的最新论文发现新冠肺炎潜伏期最长可达24天”的问题。',
'英国苏格兰政府首席大臣、苏格兰民族党党魁妮古拉·斯特金11日在伦敦说,苏格兰人应有权重新选择是否独立。',
'教育部门明确提出“延期开学”是假期的延续,各校均不得以任何形式集体组织上新课,也不得举行任何形式的线下教学活动和集体活动。'
]
embeddings = extract_embeddings(model_path, texts)
print(type(embeddings))
print(embeddings)
for _ in embeddings:
print(_[0].shape) | 125softNLP | /125softNLP-0.0.1-py3-none-any.whl/pysoftNLP/classification/extract_feature.py | extract_feature.py |
# -*- coding: utf-8 -*-
# @Time : 2020/6/23-15:03
# @Author : 贾志凯
# @File : pre.py
# @Software: win10 python3.6 PyCharm
import pandas as pd
import os
import numpy as np
from pysoftNLP.bert.extract_feature import BertVector
from keras.models import load_model
import time
def predict(model_name,text_list,label_map):
labels = []
model_dir = 'D:\pysoftNLP_resources\classification\models'
model_path = os.path.join(model_dir,model_name)
model = load_model(model_path)
bert_model = BertVector(pooling_strategy="REDUCE_MEAN", max_seq_len=80)
for text in text_list:
# 将句子转换成向量
t1=time.time()
vec = bert_model.encode([text])["encodes"][0]
t2=time.time()
print("encode时间:%s"%(t2-t1))
x_train = np.array([vec])
# 模型预测
predicted = model.predict(x_train)
t3 = time.time()
print("预测时间:%s"%(t3-t2))
y = np.argmax(predicted[0])
labels.append(label_map[y])
print(labels)
return labels
# pass
if __name__ == '__main__':
model_name = '863_classify_hy.h5'
label_map = {0:'it',1:'电力热力',2:'化工',3:'环保',4:'建筑',5:'交通 ',6:'教育文化',7:'矿业',8:'绿化',9:'能源',10: '农林' ,11:'市政',12:'水利' ,13:'通信',14:'医疗',15:'制造业'}
# load_model = load_model("D:\pysoftNLP_resources\classification\863_classify_hy.h5")
# # 预测语句
texts = ['广西打好“电力牌”组合拳助力工业企业从复产到满产中国新闻网',
'分别是吕晓雪、唐禄俊、梁秋语、王翠翠、杨兴亮、吕桃桃、张耀夫、郭建波、中国医护服务网',
'富拉尔基区市场监管局开展《优化营商环境条例》宣传活动齐齐哈尔市人民政府',
'2020上海(国际)胶粘带与薄膜技术展览会制造交易网'
]
predict(model_name,texts,label_map)
# labels = []
# bert_model = BertVector(pooling_strategy="REDUCE_MEAN", max_seq_len=80)
# # 对上述句子进行预测
# for text in texts:
# # 将句子转换成向量
# t1=time.time()
# vec = bert_model.encode([text])["encodes"][0]
# t2=time.time()
# print("encode时间:%s"%(t2-t1))
# x_train = np.array([vec])
# # 模型预测
# predicted = load_model.predict(x_train)
# t3 = time.time()
# print("预测时间:%s"%(t3-t2))
# y = np.argmax(predicted[0])
# # print(predicted)
# print(y) | 125softNLP | /125softNLP-0.0.1-py3-none-any.whl/pysoftNLP/classification/pre.py | pre.py |
# -*- coding: utf-8 -*-
# @Time : 2020/11/4-14:32
# @Author : 贾志凯 15716539228@163.com
# @File : bert_dnn.py
# @Software: win10 python3.6 PyCharm
import pandas as pd
import numpy as np
# from pysoftNLP.classification.load_data import train_df, test_df
from keras.utils import to_categorical
from keras.models import Model
from keras.optimizers import Adam
from keras.layers import Input, BatchNormalization, Dense,Dropout,SeparableConv1D,Embedding,LSTM
from pysoftNLP.bert.extract_feature import BertVector
import time
import os
#读取文件
def read_data(train_data,test_data):
file_in = 'D:\pysoftNLP_resources\classification\data'
train_data = os.path.join(file_in,train_data)
test_data = os.path.join(file_in,test_data)
train_df = pd.read_csv(train_data)
train_df.columns = ['id', 'label', 'text']
test_df = pd.read_csv(test_data)
test_df.columns = ['id', 'label', 'text']
return train_df, test_df
train_data = 'x_tr_863.csv'
test_data = 'x_te_863.csv'
train_df, test_df = read_data(train_data,test_data)
args = {'encode': 'bert', 'sentence_length': 50, 'num_classes': 9, 'batch_size': 128, 'epochs': 100}
def train(train_df,test_df,args):
out_path = 'D:\pysoftNLP_resources\classification\models'
print('encoding开始!')
star_encod_time = time.time()
bert_model = BertVector(pooling_strategy="REDUCE_MEAN", max_seq_len=args['sentence_length']) # bert词向量
f = lambda text: bert_model.encode([text])["encodes"][0]
train_df['x'] = train_df['text'].apply(f)
test_df['x'] = test_df['text'].apply(f)
end_encod_time = time.time()
print("encoding时间:%s" % (end_encod_time - star_encod_time))
x_train = np.array([vec for vec in train_df['x']])
x_test = np.array([vec for vec in test_df['x']])
y_train = np.array([vec for vec in train_df['label']])
y_test = np.array([vec for vec in test_df['label']])
print('x_train: ', x_train.shape)
y_train = to_categorical(y_train, args['num_classes'])
y_test = to_categorical(y_test, args['num_classes'])
x_in = Input(shape=(768,))
x_out = Dense(1024, activation="relu")(x_in)
x_out = BatchNormalization()(x_out)
x_out = Dropout(0.2)(x_out)
x_out = Dense(512, activation="relu")(x_out)
x_out = BatchNormalization()(x_out)
x_out = Dropout(0.2)(x_out)
x_out = Dense(256, activation="relu")(x_out)
x_out = BatchNormalization()(x_out)
x_out = Dropout(0.2)(x_out)
x_out = Dense(128, activation="relu")(x_out)
x_out = BatchNormalization()(x_out)
x_out = Dropout(0.2)(x_out)
x_out = Dense(64, activation="relu")(x_out)
x_out = BatchNormalization()(x_out)
x_out = Dropout(0.2)(x_out)
x_out = Dense(32, activation="relu")(x_out)
x_out = BatchNormalization()(x_out)
x_out = Dropout(0.2)(x_out)
x_out = Dense(16, activation="relu")(x_out)
x_out = BatchNormalization()(x_out)
x_out = Dense(args.num_classes, activation="softmax")(x_out)
model = Model(inputs=x_in, outputs=x_out)
print(model.summary())
model.compile(loss='categorical_crossentropy', # categorical_crossentropy
optimizer=Adam(), # adam
metrics=['accuracy'])
# 模型训练以及评估
model.fit(x_train, y_train, batch_size=args['batch_size'], epochs=args['epochs'])
wenj = '863_classify_768' + '_' + str(args['sentence_length']) + '_' + str(args['num_classes']) + '_' + str(args['batch_size']) + '_' + str(args['epochs']) + '.h5'
out_path = os.path.join(out_path, wenj)
model.save(out_path)
t3 = time.time()
print("训练时间:%s" % (t3 - end_encod_time))
print(model.evaluate(x_test, y_test))
t4 = time.time()
print('模型验证时长:', t4 - t3)
train(train_df,test_df,args) | 125softNLP | /125softNLP-0.0.1-py3-none-any.whl/pysoftNLP/classification/bert_dnn.py | bert_dnn.py |
# -*- coding: utf-8 -*-
# author: Jclian91
# place: Pudong Shanghai
# time: 2020-02-12 12:57
import pandas as pd
#
# # 读取txt文件
# def read_txt_file(file_path):
# with open(file_path, 'r', encoding='utf-8') as f:
# content = [_.strip() for _ in f.readlines()]
#
# labels, texts = [], []
# for line in content:
# parts = line.split()
# label, text = parts[0], ''.join(parts[1:])
# labels.append(label)
# texts.append(text)
#
# return labels, texts
#
# file_path = 'data/train.txt'
# labels, texts = read_txt_file(file_path)
# train_df = pd.DataFrame({'label': labels, 'text': texts})
#
# file_path = 'data/test.txt'
# labels, texts = read_txt_file(file_path)
# test_df = pd.DataFrame({'label': labels, 'text': texts})
train_df = pd.read_csv('data/x_tr_863.csv')
train_df.columns = ['id','label','text']
train_df=train_df.drop(['id'],axis=1)
# train_df=train_df[:500]
test_df = pd.read_csv('data/x_te_863.csv')
test_df.columns = ['id','label','text']
test_df = test_df.drop(['id'],axis=1)
# test_df=test_df[:100]
print(train_df.head())
print(test_df.head())
train_df['text_len'] = train_df['text'].apply(lambda x: len(x))
print(train_df.describe())
| 125softNLP | /125softNLP-0.0.1-py3-none-any.whl/pysoftNLP/classification/load_data.py | load_data.py |
# -*- coding: utf-8 -*-
# @Time : 2020/6/11-17:54
# @Author : 贾志凯
# @File : model_train.py
# @Software: win10 python3.6 PyCharm
import os
import tensorflow as tf
# # 如果使用GPU训练
# os.environ["CUDA_VISIBLE_DEVICES"] = "0"
# config = tf.ConfigProto()
# config.gpu_options.per_process_gpu_memory_fraction = 0.2 # 程序最多只能占用指定gpu50%的显存
# sess = tf.Session(config = config)
import pandas as pd
import numpy as np
from pysoftNLP.classification.load_data import train_df, test_df
from keras.utils import to_categorical
from keras.models import Model
from keras.optimizers import Adam,SGD
from keras.layers import Input, BatchNormalization, Dense,Dropout,SeparableConv1D,Embedding,LSTM
from pysoftNLP.bert.extract_feature import BertVector
# from keras.layers.recurrent import LSTM,GRU
from sklearn.naive_bayes import MultinomialNB
#读取文件
def read_data(train_data, test_data):
train_df = pd.read_csv(train_data)
train_df.columns = ['id', 'label', 'text']
test_df = pd.read_csv(test_data)
test_df.columns = ['id', 'label', 'text']
return train_df,test_df
# train_data, test_data
# train_df,test_df = read_data(train_data, test_data)
import time
# 读取文件并进行转换
t1 =time.time()
bert_model = BertVector(pooling_strategy="REDUCE_MEAN", max_seq_len=80)
print('begin encoding')
f = lambda text: bert_model.encode([text])["encodes"][0]
train_df['x'] = train_df['text'].apply(f)
test_df['x'] = test_df['text'].apply(f)
print('end encoding')
t2 =time.time()
print("encoding时间:%s"%(t2-t1))
x_train = np.array([vec for vec in train_df['x']])
x_test = np.array([vec for vec in test_df['x']])
y_train = np.array([vec for vec in train_df['label']])
y_test = np.array([vec for vec in test_df['label']])
print('x_train: ', x_train.shape)
# Convert class vectors to binary class matrices.
num_classes = 9
y_train = to_categorical(y_train, num_classes)
y_test = to_categorical(y_test, num_classes)
print(type(x_train),type(y_train),)
# 创建模型
x_in = Input(shape=(1024, ))
x_out = Dense(1024, activation="relu")(x_in)
x_out = BatchNormalization()(x_out)
x_out = Dropout(0.2)(x_out)
x_out = Dense(512, activation="relu")(x_out)
x_out = BatchNormalization()(x_out)
x_out = Dropout(0.2)(x_out)
x_out = Dense(256, activation="relu")(x_out)
x_out = BatchNormalization()(x_out)
x_out = Dropout(0.2)(x_out)
x_out = Dense(128, activation="relu")(x_out)
x_out = BatchNormalization()(x_out)
x_out = Dropout(0.2)(x_out)
x_out = Dense(64, activation="relu")(x_out)
x_out = BatchNormalization()(x_out)
x_out = Dropout(0.2)(x_out)
x_out = Dense(32, activation="relu")(x_out)
x_out = BatchNormalization()(x_out)
x_out = Dropout(0.2)(x_out)
x_out = Dense(16, activation="relu")(x_out)
x_out = BatchNormalization()(x_out)
x_out = Dense(num_classes, activation="softmax")(x_out)
model = Model(inputs=x_in, outputs=x_out)
print(model.summary())
model.compile(loss='categorical_crossentropy',#categorical_crossentropy
optimizer=Adam(), #adam
metrics=['accuracy'])
# 模型训练以及评估
model.fit(x_train, y_train, batch_size=128, epochs=500)
model.save('863_classify_hy_1024_9.h5')
t3 =time.time()
print("训练时间:%s"%(t3-t2))
print(model.evaluate(x_test, y_test))
t4 = time.time()
print(t4-t3)
# class logger(object):
# def __init__(self,filename):
# self.terminal = sys.stdout
# self.log = open(filename,"a")
# def write(self,message):
# self.terminal.write(message)
# self.log.write(message)
# def flush(self):
# pass
# sys.stdout = logger("a.log")
# sys.stderr =logger("A.log")
#
# clf = MultinomialNB()
# clf.fit(x_train,y_train)
# # y_pre = clf.predict(x_test)
# from sklearn.model_selection import cross_val_score
# cvs = cross_val_score(clf,x_test,y_test,scoring="accuracy",cv=10)
# print(cvs)
# print(cvs.mean()) | 125softNLP | /125softNLP-0.0.1-py3-none-any.whl/pysoftNLP/classification/model_train.py | model_train.py |
# -*- coding: utf-8 -*-
# @Time : 2020/11/3-15:23
# @Author : 贾志凯 15716539228@163.com
# @File : mian.py
# @Software: win10 python3.6 PyCharm
import pandas as pd
import numpy as np
from pysoftnlp.classification.load_data import train_df, test_df
from keras.utils import to_categorical
from keras.models import Model
from keras.optimizers import Adam,SGD
from keras.layers import Input, BatchNormalization, Dense,Dropout,SeparableConv1D,Embedding,LSTM
from bert.extract_feature import BertVector
import time
import argparse
import os
#读取文件
def read_data(train_data, test_data):
train_df = pd.read_csv(train_data)
train_df.columns = ['id', 'label', 'text']
test_df = pd.read_csv(test_data)
test_df.columns = ['id', 'label', 'text']
args = {'encode':'bert','sentence_length':50,'num_classes':9,'batch_size':128,'epochs':100}
out_path = 'C:/Users/Administrator/Desktop'
def train(args,out_path):
print('encoding开始!')
star_encod_time = time.time()
bert_model = BertVector(pooling_strategy="REDUCE_MEAN", max_seq_len=args.sentence_length) # bert词向量
f = lambda text: bert_model.encode([text])["encodes"][0]
train_df['x'] = train_df['text'].apply(f)
test_df['x'] = test_df['text'].apply(f)
end_encod_time = time.time()
print("encoding时间:%s" % (end_encod_time - star_encod_time))
x_train = np.array([vec for vec in train_df['x']])
x_test = np.array([vec for vec in test_df['x']])
y_train = np.array([vec for vec in train_df['label']])
y_test = np.array([vec for vec in test_df['label']])
print('x_train: ', x_train.shape)
y_train = to_categorical(y_train, args.num_classes)
y_test = to_categorical(y_test, args.num_classes)
x_in = Input(shape=(768,))
x_out = Dense(1024, activation="relu")(x_in)
x_out = BatchNormalization()(x_out)
x_out = Dropout(0.2)(x_out)
x_out = Dense(512, activation="relu")(x_out)
x_out = BatchNormalization()(x_out)
x_out = Dropout(0.2)(x_out)
x_out = Dense(256, activation="relu")(x_out)
x_out = BatchNormalization()(x_out)
x_out = Dropout(0.2)(x_out)
x_out = Dense(128, activation="relu")(x_out)
x_out = BatchNormalization()(x_out)
x_out = Dropout(0.2)(x_out)
x_out = Dense(64, activation="relu")(x_out)
x_out = BatchNormalization()(x_out)
x_out = Dropout(0.2)(x_out)
x_out = Dense(32, activation="relu")(x_out)
x_out = BatchNormalization()(x_out)
x_out = Dropout(0.2)(x_out)
x_out = Dense(16, activation="relu")(x_out)
x_out = BatchNormalization()(x_out)
x_out = Dense(args.num_classes, activation="softmax")(x_out)
model = Model(inputs=x_in, outputs=x_out)
print(model.summary())
model.compile(loss='categorical_crossentropy', # categorical_crossentropy
optimizer=Adam(), # adam
metrics=['accuracy'])
# 模型训练以及评估
model.fit(x_train, y_train, batch_size=args.batch_size, epochs=args.epochs)
wenj = '863_classify_768' + '_' + str(args['sentence_length']) + '_' + str(args['num_classes']) + '_' + str(args['batch_size']) + '_' + str(args['epochs']) + '.h5'
out_path = os.path.join(out_path, wenj)
model.save(out_path)
t3 = time.time()
print("训练时间:%s" % (t3 - end_encod_time))
print(model.evaluate(x_test, y_test))
t4 = time.time()
print('模型验证时长:', t4 - t3)
# if __name__ == '__main__':
# parser = argparse.ArgumentParser()
# parser.add_argument('--status', choices=['train', 'test'], help='update algorithm', default='test')
# parser.add_argument('--encode', choices=['bert','robert','word2vec','fasttxt'], help='词向量模型', default='bert')
# parser.add_argument('--sentence_length',help='句子长度', default=50,type=int)
# parser.add_argument('--num_classes', help='类别', type=int)#是几分类
# parser.add_argument('--batch_size', help='类别', type=int,default=128) # 是几分类
# parser.add_argument('--epochs', help='类别', type=int, default=100) # 是几分类
# args = parser.parse_args()
# print(args.status)
# if args.status == 'train':
# if args.encode == 'bert':
# print('encoding开始!')
# star_encod_time = time.time()
# bert_model = BertVector(pooling_strategy="REDUCE_MEAN", max_seq_len = args.sentence_length) #bert词向量
# f = lambda text: bert_model.encode([text])["encodes"][0]
# train_df['x'] = train_df['text'].apply(f)
# test_df['x'] = test_df['text'].apply(f)
# end_encod_time = time.time()
# print("encoding时间:%s" % (end_encod_time - star_encod_time))
# x_train = np.array([vec for vec in train_df['x']])
# x_test = np.array([vec for vec in test_df['x']])
# y_train = np.array([vec for vec in train_df['label']])
# y_test = np.array([vec for vec in test_df['label']])
# print('x_train: ', x_train.shape)
#
# y_train = to_categorical(y_train, args.num_classes)
# y_test = to_categorical(y_test, args.num_classes)
#
# x_in = Input(shape=(768,))
# x_out = Dense(1024, activation="relu")(x_in)
# x_out = BatchNormalization()(x_out)
# x_out = Dropout(0.2)(x_out)
# x_out = Dense(512, activation="relu")(x_out)
# x_out = BatchNormalization()(x_out)
# x_out = Dropout(0.2)(x_out)
# x_out = Dense(256, activation="relu")(x_out)
# x_out = BatchNormalization()(x_out)
# x_out = Dropout(0.2)(x_out)
# x_out = Dense(128, activation="relu")(x_out)
# x_out = BatchNormalization()(x_out)
# x_out = Dropout(0.2)(x_out)
# x_out = Dense(64, activation="relu")(x_out)
# x_out = BatchNormalization()(x_out)
# x_out = Dropout(0.2)(x_out)
# x_out = Dense(32, activation="relu")(x_out)
# x_out = BatchNormalization()(x_out)
# x_out = Dropout(0.2)(x_out)
# x_out = Dense(16, activation="relu")(x_out)
# x_out = BatchNormalization()(x_out)
# x_out = Dense(args.num_classes, activation="softmax")(x_out)
# model = Model(inputs=x_in, outputs=x_out)
# print(model.summary())
#
# model.compile(loss='categorical_crossentropy', # categorical_crossentropy
# optimizer=Adam(), # adam
# metrics=['accuracy'])
#
# # 模型训练以及评估
# model.fit(x_train, y_train, batch_size=args.batch_size, epochs=args.epochs)
# model.save('863_classify_hy_1024_9.h5')
#
# t3 = time.time()
# print("训练时间:%s" % (t3 - end_encod_time))
# print(model.evaluate(x_test, y_test))
# t4 = time.time()
# print('模型验证时长:',t4 - t3)
| 125softNLP | /125softNLP-0.0.1-py3-none-any.whl/pysoftNLP/classification/mian.py | mian.py |
# -*- coding: utf-8 -*-
# @Time : 2020/8/12-22:05
# @Author : 贾志凯
# @File : __init__.py.py
# @Software: win10 python3.6 PyCharm
__all__ = ['bert_dnn','pre']
| 125softNLP | /125softNLP-0.0.1-py3-none-any.whl/pysoftNLP/classification/__init__.py | __init__.py |
# -*- coding: utf-8 -*-
# author: Jclian91
# place: Pudong Shanghai
# time: 2020-02-12 17:33
import pandas as pd
import numpy as np
from pysoftNLP.bert.extract_feature import BertVector
from keras.models import load_model
load_model = load_model("visit_classify.h5")
# 预测语句
texts = ['在访问限制中,用户可以选择禁用iPhone的功能,包括Siri、iTunes购买功能、安装/删除应用等,甚至还可以让iPhone变成一台功能手机。以下是访问限制具体可以实现的一些功能',
'IT之家4月23日消息 近日,谷歌在其官方论坛发布消息表示,他们为Android Auto添加了一项新功能:可以访问完整联系人列表。用户现在可以通过在Auto的电话拨号界面中打开左上角的菜单访问完整的联系人列表。值得注意的是,这一功能仅支持在车辆停止时使用。',
'要通过telnet 访问路由器,需要先通过console 口对路由器进行基本配置,例如:IP地址、密码等。',
'IT之家3月26日消息 近日反盗版的国际咨询公司MUSO发布了2017年的年度报告,其中的数据显示,去年盗版资源网站访问量达到了3000亿次,比前一年(2016年)提高了1.6%。美国是访问盗版站点次数最多的国家,共有279亿次访问;其后分别是俄罗斯、印度和巴西,中国位列第18。',
'应葡萄牙议会邀请,全国人大常委会副委员长吉炳轩率团于12月14日至16日访问葡萄牙,会见副议长费利佩、社会党副总书记卡内罗。',
'2月26日至3月2日,应香港特区政府“内地贵宾访港计划”邀请,省委常委、常务副省长陈向群赴港考察访问,重点围绕“香港所长、湖南所需”,与特区政府相关部门和机构深入交流,推动湖南与香港交流合作取得新进展。',
'目前A站已经恢复了访问,可以直接登录,网页加载正常,视频已经可以正常播放。',
'难民署特使安吉丽娜·朱莉6月8日结束了对哥伦比亚和委内瑞拉边境地区的难民营地为期两天的访问,她对哥伦比亚人民展现的人道主义和勇气表示赞扬。',
'据《南德意志报》报道,德国总理默克尔计划明年1月就前往安卡拉,和土耳其总统埃尔多安进行会谈。',
'自9月14日至18日,由越共中央政治局委员、中央书记处书记、中央经济部部长阮文平率领工作代表团对希腊进行工作访问。',
'Win7电脑提示无线适配器或访问点有问题怎么办?很多用户在使用无线网连接上网时,发现无线网显示已连接,但旁边却出现了一个黄色感叹号,无法进行网络操作,通过诊断提示电脑无线适配器或访问点有问题,且处于未修复状态,这该怎么办呢?下面小编就和大家分享下Win7电脑提示无线适配器或访问点有问题的解决方法。',
'2019年10月13日至14日,外交部副部长马朝旭访问智利,会见智利外长里韦拉,同智利总统外事顾问萨拉斯举行会谈,就智利举办亚太经合组织(APEC)第二十七次领导人非正式会议等深入交换意见。',
'未开发所有安全组之前访问,FTP可以链接上,但是打开会很慢,需要1-2分钟才能链接上',
'win7系统电脑的用户,在连接WIFI网络网上时,有时候会遇到突然上不了网,查看连接的WIFI出现“有限的访问权限”的文字提示。',
'联合国秘书长潘基文8日访问了日本福岛县,与当地灾民交流并访问了一所高中。',
'国务院总理温家宝当地时间23日下午乘专机抵达布宜诺斯艾利斯,开始对阿根廷进行正式访问。',
'正在中国访问的巴巴多斯总理斯图尔特15日在陕西西安参观访问。',
'据外媒报道,当地时间10日,美国白宫发声明称,美国总统特朗普将于2月底访问印度,与印度总理莫迪进行战略对话。',
'2月28日,唐山曹妃甸蓝色海洋科技有限公司董事长赵力军等一行5人到黄海水产研究所交流访问。黄海水产研究所副所长辛福言及相关部门负责人、专家等参加了会议。',
'2018年7月2日,莫斯科孔子文化促进会会长姜彦彬,常务副会长陈国建,在中国著名留俄油画大师牟克教授的陪同下,访问了莫斯科国立苏里科夫美术学院,受到第一副校长伊戈尔·戈尔巴秋克先生接待。'
]
labels = []
bert_model = BertVector(pooling_strategy="REDUCE_MEAN", max_seq_len=100)
# 对上述句子进行预测
for text in texts:
# 将句子转换成向量
vec = bert_model.encode([text])["encodes"][0]
x_train = np.array([vec])
# 模型预测
predicted = load_model.predict(x_train)
y = np.argmax(predicted[0])
label = 'Y' if y else 'N'
labels.append(label)
for text,label in zip(texts, labels):
print('%s\t%s'%(label, text))
df = pd.DataFrame({'句子':texts, "是否属于出访类事件": labels})
df.to_excel('./result.xlsx', index=False)
| 125softNLP | /125softNLP-0.0.1-py3-none-any.whl/pysoftNLP/classification/model_predict.py | model_predict.py |
from pysoftNLP.bert.graph import import_tf
from pysoftNLP.bert import modeling
from pysoftNLP.bert import tokenization
from pysoftNLP.bert.graph import optimize_graph
from pysoftNLP.bert import args
from queue import Queue
from threading import Thread
tf = import_tf(0, True)
class InputExample(object):
def __init__(self, unique_id, text_a, text_b):
self.unique_id = unique_id
self.text_a = text_a
self.text_b = text_b
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self, unique_id, tokens, input_ids, input_mask, input_type_ids):
self.unique_id = unique_id
self.tokens = tokens
self.input_ids = input_ids
self.input_mask = input_mask
self.input_type_ids = input_type_ids
class BertVector:
def __init__(self, batch_size=32, pooling_strategy="REDUCE_MEAN", max_seq_len=40):
"""
init BertVector
:param batch_size: Depending on your memory default is 32
"""
self.max_seq_length = max_seq_len
self.layer_indexes = args.layer_indexes
self.gpu_memory_fraction = 1
if pooling_strategy == "NONE":
pooling_strategy = args.PoolingStrategy.NONE
elif pooling_strategy == "REDUCE_MAX":
pooling_strategy = args.PoolingStrategy.REDUCE_MAX
elif pooling_strategy == "REDUCE_MEAN":
pooling_strategy = args.PoolingStrategy.REDUCE_MEAN
elif pooling_strategy == "REDUCE_MEAN_MAX":
pooling_strategy = args.PoolingStrategy.REDUCE_MEAN_MAX
self.graph_path = optimize_graph(pooling_strategy=pooling_strategy, max_seq_len=self.max_seq_length)
self.tokenizer = tokenization.FullTokenizer(vocab_file=args.vocab_file, do_lower_case=True)
self.batch_size = batch_size
self.estimator = self.get_estimator()
self.input_queue = Queue(maxsize=1)
self.output_queue = Queue(maxsize=1)
self.predict_thread = Thread(target=self.predict_from_queue, daemon=True)
self.predict_thread.start()
def get_estimator(self):
from tensorflow.python.estimator.estimator import Estimator
from tensorflow.python.estimator.run_config import RunConfig
from tensorflow.python.estimator.model_fn import EstimatorSpec
def model_fn(features, labels, mode, params):
with tf.gfile.GFile(self.graph_path, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
input_names = ['input_ids', 'input_mask', 'input_type_ids']
output = tf.import_graph_def(graph_def,
input_map={k + ':0': features[k] for k in input_names},
return_elements=['final_encodes:0'])
return EstimatorSpec(mode=mode, predictions={
'encodes': output[0]
})
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.gpu_options.per_process_gpu_memory_fraction = self.gpu_memory_fraction
config.log_device_placement = False
config.graph_options.optimizer_options.global_jit_level = tf.OptimizerOptions.ON_1
return Estimator(model_fn=model_fn, config=RunConfig(session_config=config),
params={'batch_size': self.batch_size})
def predict_from_queue(self):
prediction = self.estimator.predict(input_fn=self.queue_predict_input_fn, yield_single_examples=False)
for i in prediction:
self.output_queue.put(i)
def encode(self, sentence):
self.input_queue.put(sentence)
prediction = self.output_queue.get()
return prediction
def queue_predict_input_fn(self):
return (tf.data.Dataset.from_generator(
self.generate_from_queue,
output_types={'unique_ids': tf.int32,
'input_ids': tf.int32,
'input_mask': tf.int32,
'input_type_ids': tf.int32},
output_shapes={
'unique_ids': (1,),
'input_ids': (None, self.max_seq_length),
'input_mask': (None, self.max_seq_length),
'input_type_ids': (None, self.max_seq_length)}))
def generate_from_queue(self):
while True:
features = list(self.convert_examples_to_features(seq_length=self.max_seq_length, tokenizer=self.tokenizer))
yield {
'unique_ids': [f.unique_id for f in features],
'input_ids': [f.input_ids for f in features],
'input_mask': [f.input_mask for f in features],
'input_type_ids': [f.input_type_ids for f in features]
}
def input_fn_builder(self, features, seq_length):
"""Creates an `input_fn` closure to be passed to Estimator."""
all_unique_ids = []
all_input_ids = []
all_input_mask = []
all_input_type_ids = []
for feature in features:
all_unique_ids.append(feature.unique_id)
all_input_ids.append(feature.input_ids)
all_input_mask.append(feature.input_mask)
all_input_type_ids.append(feature.input_type_ids)
def input_fn(params):
"""The actual input function."""
batch_size = params["batch_size"]
num_examples = len(features)
# This is for demo purposes and does NOT scale to large data sets. We do
# not use Dataset.from_generator() because that uses tf.py_func which is
# not TPU compatible. The right way to load data is with TFRecordReader.
d = tf.data.Dataset.from_tensor_slices({
"unique_ids":
tf.constant(all_unique_ids, shape=[num_examples], dtype=tf.int32),
"input_ids":
tf.constant(
all_input_ids, shape=[num_examples, seq_length],
dtype=tf.int32),
"input_mask":
tf.constant(
all_input_mask,
shape=[num_examples, seq_length],
dtype=tf.int32),
"input_type_ids":
tf.constant(
all_input_type_ids,
shape=[num_examples, seq_length],
dtype=tf.int32),
})
d = d.batch(batch_size=batch_size, drop_remainder=False)
return d
return input_fn
def model_fn_builder(self, bert_config, init_checkpoint, layer_indexes):
"""Returns `model_fn` closure for TPUEstimator."""
def model_fn(features, labels, mode, params): # pylint: disable=unused-argument
"""The `model_fn` for TPUEstimator."""
unique_ids = features["unique_ids"]
input_ids = features["input_ids"]
input_mask = features["input_mask"]
input_type_ids = features["input_type_ids"]
jit_scope = tf.contrib.compiler.jit.experimental_jit_scope
with jit_scope():
model = modeling.BertModel(
config=bert_config,
is_training=False,
input_ids=input_ids,
input_mask=input_mask,
token_type_ids=input_type_ids)
if mode != tf.estimator.ModeKeys.PREDICT:
raise ValueError("Only PREDICT modes are supported: %s" % (mode))
tvars = tf.trainable_variables()
(assignment_map, initialized_variable_names) = modeling.get_assignment_map_from_checkpoint(tvars,
init_checkpoint)
tf.logging.info("**** Trainable Variables ****")
for var in tvars:
init_string = ""
if var.name in initialized_variable_names:
init_string = ", *INIT_FROM_CKPT*"
tf.logging.info(" name = %s, shape = %s%s", var.name, var.shape,
init_string)
all_layers = model.get_all_encoder_layers()
predictions = {
"unique_id": unique_ids,
}
for (i, layer_index) in enumerate(layer_indexes):
predictions["layer_output_%d" % i] = all_layers[layer_index]
from tensorflow.python.estimator.model_fn import EstimatorSpec
output_spec = EstimatorSpec(mode=mode, predictions=predictions)
return output_spec
return model_fn
def convert_examples_to_features(self, seq_length, tokenizer):
"""Loads a data file into a list of `InputBatch`s."""
features = []
input_masks = []
examples = self._to_example(self.input_queue.get())
for (ex_index, example) in enumerate(examples):
tokens_a = tokenizer.tokenize(example.text_a)
# if the sentences's length is more than seq_length, only use sentence's left part
if len(tokens_a) > seq_length - 2:
tokens_a = tokens_a[0:(seq_length - 2)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens = []
input_type_ids = []
tokens.append("[CLS]")
input_type_ids.append(0)
for token in tokens_a:
tokens.append(token)
input_type_ids.append(0)
tokens.append("[SEP]")
input_type_ids.append(0)
# Where "input_ids" are tokens's index in vocabulary
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
input_masks.append(input_mask)
# Zero-pad up to the sequence length.
while len(input_ids) < seq_length:
input_ids.append(0)
input_mask.append(0)
input_type_ids.append(0)
assert len(input_ids) == seq_length
assert len(input_mask) == seq_length
assert len(input_type_ids) == seq_length
if ex_index < 5:
tf.logging.info("*** Example ***")
tf.logging.info("unique_id: %s" % (example.unique_id))
tf.logging.info("tokens: %s" % " ".join(
[tokenization.printable_text(x) for x in tokens]))
tf.logging.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
tf.logging.info("input_mask: %s" % " ".join([str(x) for x in input_mask]))
tf.logging.info(
"input_type_ids: %s" % " ".join([str(x) for x in input_type_ids]))
yield InputFeatures(
unique_id=example.unique_id,
tokens=tokens,
input_ids=input_ids,
input_mask=input_mask,
input_type_ids=input_type_ids)
def _truncate_seq_pair(self, tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
# This is a simple heuristic which will always truncate the longer sequence
# one token at a time. This makes more sense than truncating an equal percent
# of tokens from each, since if one sequence is very short then each token
# that's truncated likely contains more information than a longer sequence.
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
@staticmethod
def _to_example(sentences):
import re
"""
sentences to InputExample
:param sentences: list of strings
:return: list of InputExample
"""
unique_id = 0
for ss in sentences:
line = tokenization.convert_to_unicode(ss)
if not line:
continue
line = line.strip()
text_a = None
text_b = None
m = re.match(r"^(.*) \|\|\| (.*)$", line)
if m is None:
text_a = line
else:
text_a = m.group(1)
text_b = m.group(2)
yield InputExample(unique_id=unique_id, text_a=text_a, text_b=text_b)
unique_id += 1
if __name__ == "__main__":
import time
bert = BertVector()
while True:
question = input('question: ')
start = time.time()
vectors = bert.encode([question])
print(str(vectors))
#print(f'predict time:----------{time.time() - start}')
| 125softNLP | /125softNLP-0.0.1-py3-none-any.whl/pysoftNLP/bert/extract_feature.py | extract_feature.py |
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions and classes related to optimization (weight updates)."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
import tensorflow as tf
def create_optimizer(loss, init_lr, num_train_steps, num_warmup_steps, use_tpu):
"""Creates an optimizer training op."""
global_step = tf.train.get_or_create_global_step()
learning_rate = tf.constant(value=init_lr, shape=[], dtype=tf.float32)
# Implements linear decay of the learning rate.
learning_rate = tf.train.polynomial_decay(
learning_rate,
global_step,
num_train_steps,
end_learning_rate=0.0,
power=1.0,
cycle=False)
# Implements linear warmup. I.e., if global_step < num_warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
if num_warmup_steps:
global_steps_int = tf.cast(global_step, tf.int32)
warmup_steps_int = tf.constant(num_warmup_steps, dtype=tf.int32)
global_steps_float = tf.cast(global_steps_int, tf.float32)
warmup_steps_float = tf.cast(warmup_steps_int, tf.float32)
warmup_percent_done = global_steps_float / warmup_steps_float
warmup_learning_rate = init_lr * warmup_percent_done
is_warmup = tf.cast(global_steps_int < warmup_steps_int, tf.float32)
learning_rate = (
(1.0 - is_warmup) * learning_rate + is_warmup * warmup_learning_rate)
# It is recommended that you use this optimizer for fine tuning, since this
# is how the model was trained (note that the Adam m/v variables are NOT
# loaded from init_checkpoint.)
optimizer = AdamWeightDecayOptimizer(
learning_rate=learning_rate,
weight_decay_rate=0.01,
beta_1=0.9,
beta_2=0.999,
epsilon=1e-6,
exclude_from_weight_decay=["LayerNorm", "layer_norm", "bias"])
if use_tpu:
optimizer = tf.contrib.tpu.CrossShardOptimizer(optimizer)
tvars = tf.trainable_variables()
grads = tf.gradients(loss, tvars)
# This is how the model was pre-trained.
(grads, _) = tf.clip_by_global_norm(grads, clip_norm=1.0)
train_op = optimizer.apply_gradients(
zip(grads, tvars), global_step=global_step)
new_global_step = global_step + 1
train_op = tf.group(train_op, [global_step.assign(new_global_step)])
return train_op
class AdamWeightDecayOptimizer(tf.train.Optimizer):
"""A basic Adam optimizer that includes "correct" L2 weight decay."""
def __init__(self,
learning_rate,
weight_decay_rate=0.0,
beta_1=0.9,
beta_2=0.999,
epsilon=1e-6,
exclude_from_weight_decay=None,
name="AdamWeightDecayOptimizer"):
"""Constructs a AdamWeightDecayOptimizer."""
super(AdamWeightDecayOptimizer, self).__init__(False, name)
self.learning_rate = learning_rate
self.weight_decay_rate = weight_decay_rate
self.beta_1 = beta_1
self.beta_2 = beta_2
self.epsilon = epsilon
self.exclude_from_weight_decay = exclude_from_weight_decay
def apply_gradients(self, grads_and_vars, global_step=None, name=None):
"""See base class."""
assignments = []
for (grad, param) in grads_and_vars:
if grad is None or param is None:
continue
param_name = self._get_variable_name(param.name)
m = tf.get_variable(
name=param_name + "/adam_m",
shape=param.shape.as_list(),
dtype=tf.float32,
trainable=False,
initializer=tf.zeros_initializer())
v = tf.get_variable(
name=param_name + "/adam_v",
shape=param.shape.as_list(),
dtype=tf.float32,
trainable=False,
initializer=tf.zeros_initializer())
# Standard Adam update.
next_m = (
tf.multiply(self.beta_1, m) + tf.multiply(1.0 - self.beta_1, grad))
next_v = (
tf.multiply(self.beta_2, v) + tf.multiply(1.0 - self.beta_2,
tf.square(grad)))
update = next_m / (tf.sqrt(next_v) + self.epsilon)
# Just adding the square of the weights to the loss function is *not*
# the correct way of using L2 regularization/weight decay with Adam,
# since that will interact with the m and v parameters in strange ways.
#
# Instead we want ot decay the weights in a manner that doesn't interact
# with the m/v parameters. This is equivalent to adding the square
# of the weights to the loss with plain (non-momentum) SGD.
if self._do_use_weight_decay(param_name):
update += self.weight_decay_rate * param
update_with_lr = self.learning_rate * update
next_param = param - update_with_lr
assignments.extend(
[param.assign(next_param),
m.assign(next_m),
v.assign(next_v)])
return tf.group(*assignments, name=name)
def _do_use_weight_decay(self, param_name):
"""Whether to use L2 weight decay for `param_name`."""
if not self.weight_decay_rate:
return False
if self.exclude_from_weight_decay:
for r in self.exclude_from_weight_decay:
if re.search(r, param_name) is not None:
return False
return True
def _get_variable_name(self, param_name):
"""Get the variable name from the tensor name."""
m = re.match("^(.*):\\d+$", param_name)
if m is not None:
param_name = m.group(1)
return param_name
| 125softNLP | /125softNLP-0.0.1-py3-none-any.whl/pysoftNLP/bert/optimization.py | optimization.py |
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tokenization classes."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import unicodedata
import six
import tensorflow as tf
def convert_to_unicode(text):
"""Converts `text` to Unicode (if it's not already), assuming utf-8 input."""
if six.PY3:
if isinstance(text, str):
return text
elif isinstance(text, bytes):
return text.decode("utf-8", "ignore")
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
elif six.PY2:
if isinstance(text, str):
return text.decode("utf-8", "ignore")
elif isinstance(text, unicode):
return text
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
else:
raise ValueError("Not running on Python2 or Python 3?")
def printable_text(text):
"""Returns text encoded in a way suitable for print or `tf.logging`."""
# These functions want `str` for both Python2 and Python3, but in one case
# it's a Unicode string and in the other it's a byte string.
if six.PY3:
if isinstance(text, str):
return text
elif isinstance(text, bytes):
return text.decode("utf-8", "ignore")
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
elif six.PY2:
if isinstance(text, str):
return text
elif isinstance(text, unicode):
return text.encode("utf-8")
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
else:
raise ValueError("Not running on Python2 or Python 3?")
def load_vocab(vocab_file):
"""Loads a vocabulary file into a dictionary."""
vocab = collections.OrderedDict()
index = 0
with tf.gfile.GFile(vocab_file, "r") as reader:
while True:
token = convert_to_unicode(reader.readline())
if not token:
break
token = token.strip()
vocab[token] = index
index += 1
return vocab
def convert_by_vocab(vocab, items):
"""Converts a sequence of [tokens|ids] using the vocab."""
output = []
for item in items:
output.append(vocab[item])
return output
def convert_tokens_to_ids(vocab, tokens):
return convert_by_vocab(vocab, tokens)
def convert_ids_to_tokens(inv_vocab, ids):
return convert_by_vocab(inv_vocab, ids)
def whitespace_tokenize(text):
"""Runs basic whitespace cleaning and splitting on a piece of text."""
text = text.strip()
if not text:
return []
tokens = text.split()
return tokens
class FullTokenizer(object):
"""Runs end-to-end tokenziation."""
def __init__(self, vocab_file, do_lower_case=True):
self.vocab = load_vocab(vocab_file)
self.inv_vocab = {v: k for k, v in self.vocab.items()}
self.basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case)
self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab)
def tokenize(self, text):
split_tokens = []
for token in self.basic_tokenizer.tokenize(text):
for sub_token in self.wordpiece_tokenizer.tokenize(token):
split_tokens.append(sub_token)
return split_tokens
def convert_tokens_to_ids(self, tokens):
return convert_by_vocab(self.vocab, tokens)
def convert_ids_to_tokens(self, ids):
return convert_by_vocab(self.inv_vocab, ids)
class BasicTokenizer(object):
"""Runs basic tokenization (punctuation splitting, lower casing, etc.)."""
def __init__(self, do_lower_case=True):
"""Constructs a BasicTokenizer.
Args:
do_lower_case: Whether to lower case the input.
"""
self.do_lower_case = do_lower_case
def tokenize(self, text):
"""Tokenizes a piece of text."""
text = convert_to_unicode(text)
text = self._clean_text(text)
# This was added on November 1st, 2018 for the multilingual and Chinese
# models. This is also applied to the English models now, but it doesn't
# matter since the English models were not trained on any Chinese data
# and generally don't have any Chinese data in them (there are Chinese
# characters in the vocabulary because Wikipedia does have some Chinese
# words in the English Wikipedia.).
text = self._tokenize_chinese_chars(text)
orig_tokens = whitespace_tokenize(text)
split_tokens = []
for token in orig_tokens:
if self.do_lower_case:
token = token.lower()
token = self._run_strip_accents(token)
split_tokens.extend(self._run_split_on_punc(token))
output_tokens = whitespace_tokenize(" ".join(split_tokens))
return output_tokens
def _run_strip_accents(self, text):
"""Strips accents from a piece of text."""
text = unicodedata.normalize("NFD", text)
output = []
for char in text:
cat = unicodedata.category(char)
if cat == "Mn":
continue
output.append(char)
return "".join(output)
def _run_split_on_punc(self, text):
"""Splits punctuation on a piece of text."""
chars = list(text)
i = 0
start_new_word = True
output = []
while i < len(chars):
char = chars[i]
if _is_punctuation(char):
output.append([char])
start_new_word = True
else:
if start_new_word:
output.append([])
start_new_word = False
output[-1].append(char)
i += 1
return ["".join(x) for x in output]
def _tokenize_chinese_chars(self, text):
"""Adds whitespace around any CJK character."""
output = []
for char in text:
cp = ord(char)
if self._is_chinese_char(cp):
output.append(" ")
output.append(char)
output.append(" ")
else:
output.append(char)
return "".join(output)
def _is_chinese_char(self, cp):
"""Checks whether CP is the codepoint of a CJK character."""
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if ((cp >= 0x4E00 and cp <= 0x9FFF) or #
(cp >= 0x3400 and cp <= 0x4DBF) or #
(cp >= 0x20000 and cp <= 0x2A6DF) or #
(cp >= 0x2A700 and cp <= 0x2B73F) or #
(cp >= 0x2B740 and cp <= 0x2B81F) or #
(cp >= 0x2B820 and cp <= 0x2CEAF) or
(cp >= 0xF900 and cp <= 0xFAFF) or #
(cp >= 0x2F800 and cp <= 0x2FA1F)): #
return True
return False
def _clean_text(self, text):
"""Performs invalid character removal and whitespace cleanup on text."""
output = []
for char in text:
cp = ord(char)
if cp == 0 or cp == 0xfffd or _is_control(char):
continue
if _is_whitespace(char):
output.append(" ")
else:
output.append(char)
return "".join(output)
class WordpieceTokenizer(object):
"""Runs WordPiece tokenziation."""
def __init__(self, vocab, unk_token="[UNK]", max_input_chars_per_word=200):
self.vocab = vocab
self.unk_token = unk_token
self.max_input_chars_per_word = max_input_chars_per_word
def tokenize(self, text):
"""Tokenizes a piece of text into its word pieces.
This uses a greedy longest-match-first algorithm to perform tokenization
using the given vocabulary.
For example:
input = "unaffable"
output = ["un", "##aff", "##able"]
Args:
text: A single token or whitespace separated tokens. This should have
already been passed through `BasicTokenizer.
Returns:
A list of wordpiece tokens.
"""
text = convert_to_unicode(text)
output_tokens = []
for token in whitespace_tokenize(text):
chars = list(token)
if len(chars) > self.max_input_chars_per_word:
output_tokens.append(self.unk_token)
continue
is_bad = False
start = 0
sub_tokens = []
while start < len(chars):
end = len(chars)
cur_substr = None
while start < end:
substr = "".join(chars[start:end])
if start > 0:
substr = "##" + substr
if substr in self.vocab:
cur_substr = substr
break
end -= 1
if cur_substr is None:
is_bad = True
break
sub_tokens.append(cur_substr)
start = end
if is_bad:
output_tokens.append(self.unk_token)
else:
output_tokens.extend(sub_tokens)
return output_tokens
def _is_whitespace(char):
"""Checks whether `chars` is a whitespace character."""
# \t, \n, and \r are technically contorl characters but we treat them
# as whitespace since they are generally considered as such.
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
cat = unicodedata.category(char)
if cat == "Zs":
return True
return False
def _is_control(char):
"""Checks whether `chars` is a control character."""
# These are technically control characters but we count them as whitespace
# characters.
if char == "\t" or char == "\n" or char == "\r":
return False
cat = unicodedata.category(char)
if cat.startswith("C"):
return True
return False
def _is_punctuation(char):
"""Checks whether `chars` is a punctuation character."""
cp = ord(char)
# We treat all non-letter/number ASCII as punctuation.
# Characters such as "^", "$", and "`" are not in the Unicode
# Punctuation class but we treat them as punctuation anyways, for
# consistency.
if ((cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or
(cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126)):
return True
cat = unicodedata.category(char)
if cat.startswith("P"):
return True
return False
| 125softNLP | /125softNLP-0.0.1-py3-none-any.whl/pysoftNLP/bert/tokenization.py | tokenization.py |
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The main BERT model and related functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import copy
import json
import math
import re
import six
import tensorflow as tf
class BertConfig(object):
"""Configuration for `BertModel`."""
def __init__(self,
vocab_size,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=16,
initializer_range=0.02):
"""Constructs BertConfig.
Args:
vocab_size: Vocabulary size of `inputs_ids` in `BertModel`.
hidden_size: Size of the encoder layers and the pooler layer.
num_hidden_layers: Number of hidden layers in the Transformer encoder.
num_attention_heads: Number of attention heads for each attention layer in
the Transformer encoder.
intermediate_size: The size of the "intermediate" (i.e., feed-forward)
layer in the Transformer encoder.
hidden_act: The non-linear activation function (function or string) in the
encoder and pooler.
hidden_dropout_prob: The dropout probability for all fully connected
layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob: The dropout ratio for the attention
probabilities.
max_position_embeddings: The maximum sequence length that this model might
ever be used with. Typically set this to something large just in case
(e.g., 512 or 1024 or 2048).
type_vocab_size: The vocabulary size of the `token_type_ids` passed into
`BertModel`.
initializer_range: The stdev of the truncated_normal_initializer for
initializing all weight matrices.
"""
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
@classmethod
def from_dict(cls, json_object):
"""Constructs a `BertConfig` from a Python dictionary of parameters."""
config = BertConfig(vocab_size=None)
for (key, value) in six.iteritems(json_object):
config.__dict__[key] = value
return config
@classmethod
def from_json_file(cls, json_file):
"""Constructs a `BertConfig` from a json file of parameters."""
with tf.gfile.GFile(json_file, "r") as reader:
text = reader.read()
return cls.from_dict(json.loads(text))
def to_dict(self):
"""Serializes this instance to a Python dictionary."""
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
"""Serializes this instance to a JSON string."""
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
class BertModel(object):
"""BERT model ("Bidirectional Embedding Representations from a Transformer").
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = tf.constant([[31, 51, 99], [15, 5, 0]])
input_mask = tf.constant([[1, 1, 1], [1, 1, 0]])
token_type_ids = tf.constant([[0, 0, 1], [0, 2, 0]])
config = modeling.BertConfig(vocab_size=32000, hidden_size=512,
num_hidden_layers=8, num_attention_heads=6, intermediate_size=1024)
model = modeling.BertModel(config=config, is_training=True,
input_ids=input_ids, input_mask=input_mask, token_type_ids=token_type_ids)
label_embeddings = tf.get_variable(...)
pooled_output = model.get_pooled_output()
logits = tf.matmul(pooled_output, label_embeddings)
...
```
"""
def __init__(self,
config,
is_training,
input_ids,
input_mask=None,
token_type_ids=None,
use_one_hot_embeddings=True,
scope=None):
"""Constructor for BertModel.
Args:
config: `BertConfig` instance.
is_training: bool. rue for training model, false for eval model. Controls
whether dropout will be applied.
input_ids: int32 Tensor of shape [batch_size, seq_length].
input_mask: (optional) int32 Tensor of shape [batch_size, seq_length].
token_type_ids: (optional) int32 Tensor of shape [batch_size, seq_length].
use_one_hot_embeddings: (optional) bool. Whether to use one-hot word
embeddings or tf.embedding_lookup() for the word embeddings. On the TPU,
it is must faster if this is True, on the CPU or GPU, it is faster if
this is False.
scope: (optional) variable scope. Defaults to "bert".
Raises:
ValueError: The config is invalid or one of the input tensor shapes
is invalid.
"""
config = copy.deepcopy(config)
if not is_training:
config.hidden_dropout_prob = 0.0
config.attention_probs_dropout_prob = 0.0
input_shape = get_shape_list(input_ids, expected_rank=2)
batch_size = input_shape[0]
seq_length = input_shape[1]
if input_mask is None:
input_mask = tf.ones(shape=[batch_size, seq_length], dtype=tf.int32)
if token_type_ids is None:
token_type_ids = tf.zeros(shape=[batch_size, seq_length], dtype=tf.int32)
with tf.variable_scope(scope, default_name="bert"):
with tf.variable_scope("embeddings"):
# Perform embedding lookup on the word ids.
(self.embedding_output, self.embedding_table) = embedding_lookup(
input_ids=input_ids,
vocab_size=config.vocab_size,
embedding_size=config.hidden_size,
initializer_range=config.initializer_range,
word_embedding_name="word_embeddings",
use_one_hot_embeddings=use_one_hot_embeddings)
# Add positional embeddings and token type embeddings, then layer
# normalize and perform dropout.
self.embedding_output = embedding_postprocessor(
input_tensor=self.embedding_output,
use_token_type=True,
token_type_ids=token_type_ids,
token_type_vocab_size=config.type_vocab_size,
token_type_embedding_name="token_type_embeddings",
use_position_embeddings=True,
position_embedding_name="position_embeddings",
initializer_range=config.initializer_range,
max_position_embeddings=config.max_position_embeddings,
dropout_prob=config.hidden_dropout_prob)
with tf.variable_scope("encoder"):
# This converts a 2D mask of shape [batch_size, seq_length] to a 3D
# mask of shape [batch_size, seq_length, seq_length] which is used
# for the attention scores.
attention_mask = create_attention_mask_from_input_mask(
input_ids, input_mask)
# Run the stacked transformer.
# `sequence_output` shape = [batch_size, seq_length, hidden_size].
self.all_encoder_layers = transformer_model(
input_tensor=self.embedding_output,
attention_mask=attention_mask,
hidden_size=config.hidden_size,
num_hidden_layers=config.num_hidden_layers,
num_attention_heads=config.num_attention_heads,
intermediate_size=config.intermediate_size,
intermediate_act_fn=get_activation(config.hidden_act),
hidden_dropout_prob=config.hidden_dropout_prob,
attention_probs_dropout_prob=config.attention_probs_dropout_prob,
initializer_range=config.initializer_range,
do_return_all_layers=True)
self.sequence_output = self.all_encoder_layers[-1]
# The "pooler" converts the encoded sequence tensor of shape
# [batch_size, seq_length, hidden_size] to a tensor of shape
# [batch_size, hidden_size]. This is necessary for segment-level
# (or segment-pair-level) classification tasks where we need a fixed
# dimensional representation of the segment.
with tf.variable_scope("pooler"):
# We "pool" the model by simply taking the hidden state corresponding
# to the first token. We assume that this has been pre-trained
first_token_tensor = tf.squeeze(self.sequence_output[:, 0:1, :], axis=1)
self.pooled_output = tf.layers.dense(
first_token_tensor,
config.hidden_size,
activation=tf.tanh,
kernel_initializer=create_initializer(config.initializer_range))
def get_pooled_output(self):
return self.pooled_output
def get_sequence_output(self):
"""Gets final hidden layer of encoder.
Returns:
float Tensor of shape [batch_size, seq_length, hidden_size] corresponding
to the final hidden of the transformer encoder.
"""
return self.sequence_output
def get_all_encoder_layers(self):
return self.all_encoder_layers
def get_embedding_output(self):
"""Gets output of the embedding lookup (i.e., input to the transformer).
Returns:
float Tensor of shape [batch_size, seq_length, hidden_size] corresponding
to the output of the embedding layer, after summing the word
embeddings with the positional embeddings and the token type embeddings,
then performing layer normalization. This is the input to the transformer.
"""
return self.embedding_output
def get_embedding_table(self):
return self.embedding_table
def gelu(input_tensor):
"""Gaussian Error Linear Unit.
This is a smoother version of the RELU.
Original paper: https://arxiv.org/abs/1606.08415
Args:
input_tensor: float Tensor to perform activation.
Returns:
`input_tensor` with the GELU activation applied.
"""
cdf = 0.5 * (1.0 + tf.erf(input_tensor / tf.sqrt(2.0)))
return input_tensor * cdf
def get_activation(activation_string):
"""Maps a string to a Python function, e.g., "relu" => `tf.nn.relu`.
Args:
activation_string: String name of the activation function.
Returns:
A Python function corresponding to the activation function. If
`activation_string` is None, empty, or "linear", this will return None.
If `activation_string` is not a string, it will return `activation_string`.
Raises:
ValueError: The `activation_string` does not correspond to a known
activation.
"""
# We assume that anything that"s not a string is already an activation
# function, so we just return it.
if not isinstance(activation_string, six.string_types):
return activation_string
if not activation_string:
return None
act = activation_string.lower()
if act == "linear":
return None
elif act == "relu":
return tf.nn.relu
elif act == "gelu":
return gelu
elif act == "tanh":
return tf.tanh
else:
raise ValueError("Unsupported activation: %s" % act)
def get_assignment_map_from_checkpoint(tvars, init_checkpoint):
"""Compute the union of the current variables and checkpoint variables."""
assignment_map = {}
initialized_variable_names = {}
name_to_variable = collections.OrderedDict()
for var in tvars:
name = var.name
m = re.match("^(.*):\\d+$", name)
if m is not None:
name = m.group(1)
name_to_variable[name] = var
init_vars = tf.train.list_variables(init_checkpoint)
assignment_map = collections.OrderedDict()
for x in init_vars:
(name, var) = (x[0], x[1])
if name not in name_to_variable:
continue
assignment_map[name] = name
initialized_variable_names[name] = 1
initialized_variable_names[name + ":0"] = 1
return (assignment_map, initialized_variable_names)
def dropout(input_tensor, dropout_prob):
"""Perform dropout.
Args:
input_tensor: float Tensor.
dropout_prob: Python float. The probability of dropping out a value (NOT of
*keeping* a dimension as in `tf.nn.dropout`).
Returns:
A version of `input_tensor` with dropout applied.
"""
if dropout_prob is None or dropout_prob == 0.0:
return input_tensor
output = tf.nn.dropout(input_tensor, 1.0 - dropout_prob)
return output
def layer_norm(input_tensor, name=None):
"""Run layer normalization on the last dimension of the tensor."""
return tf.contrib.layers.layer_norm(
inputs=input_tensor, begin_norm_axis=-1, begin_params_axis=-1, scope=name)
def layer_norm_and_dropout(input_tensor, dropout_prob, name=None):
"""Runs layer normalization followed by dropout."""
output_tensor = layer_norm(input_tensor, name)
output_tensor = dropout(output_tensor, dropout_prob)
return output_tensor
def create_initializer(initializer_range=0.02):
"""Creates a `truncated_normal_initializer` with the given range."""
return tf.truncated_normal_initializer(stddev=initializer_range)
def embedding_lookup(input_ids,
vocab_size,
embedding_size=128,
initializer_range=0.02,
word_embedding_name="word_embeddings",
use_one_hot_embeddings=False):
"""Looks up words embeddings for id tensor.
Args:
input_ids: int32 Tensor of shape [batch_size, seq_length] containing word
ids.
vocab_size: int. Size of the embedding vocabulary.
embedding_size: int. Width of the word embeddings.
initializer_range: float. Embedding initialization range.
word_embedding_name: string. Name of the embedding table.
use_one_hot_embeddings: bool. If True, use one-hot method for word
embeddings. If False, use `tf.nn.embedding_lookup()`. One hot is better
for TPUs.
Returns:
float Tensor of shape [batch_size, seq_length, embedding_size].
"""
# This function assumes that the input is of shape [batch_size, seq_length,
# num_inputs].
#
# If the input is a 2D tensor of shape [batch_size, seq_length], we
# reshape to [batch_size, seq_length, 1].
if input_ids.shape.ndims == 2:
input_ids = tf.expand_dims(input_ids, axis=[-1])
embedding_table = tf.get_variable(
name=word_embedding_name,
shape=[vocab_size, embedding_size],
initializer=create_initializer(initializer_range))
if use_one_hot_embeddings:
flat_input_ids = tf.reshape(input_ids, [-1])
one_hot_input_ids = tf.one_hot(flat_input_ids, depth=vocab_size)
output = tf.matmul(one_hot_input_ids, embedding_table)
else:
output = tf.nn.embedding_lookup(embedding_table, input_ids)
input_shape = get_shape_list(input_ids)
output = tf.reshape(output,
input_shape[0:-1] + [input_shape[-1] * embedding_size])
return (output, embedding_table)
def embedding_postprocessor(input_tensor,
use_token_type=False,
token_type_ids=None,
token_type_vocab_size=16,
token_type_embedding_name="token_type_embeddings",
use_position_embeddings=True,
position_embedding_name="position_embeddings",
initializer_range=0.02,
max_position_embeddings=512,
dropout_prob=0.1):
"""Performs various post-processing on a word embedding tensor.
Args:
input_tensor: float Tensor of shape [batch_size, seq_length,
embedding_size].
use_token_type: bool. Whether to add embeddings for `token_type_ids`.
token_type_ids: (optional) int32 Tensor of shape [batch_size, seq_length].
Must be specified if `use_token_type` is True.
token_type_vocab_size: int. The vocabulary size of `token_type_ids`.
token_type_embedding_name: string. The name of the embedding table variable
for token type ids.
use_position_embeddings: bool. Whether to add position embeddings for the
position of each token in the sequence.
position_embedding_name: string. The name of the embedding table variable
for positional embeddings.
initializer_range: float. Range of the weight initialization.
max_position_embeddings: int. Maximum sequence length that might ever be
used with this model. This can be longer than the sequence length of
input_tensor, but cannot be shorter.
dropout_prob: float. Dropout probability applied to the final output tensor.
Returns:
float tensor with same shape as `input_tensor`.
Raises:
ValueError: One of the tensor shapes or input values is invalid.
"""
input_shape = get_shape_list(input_tensor, expected_rank=3)
batch_size = input_shape[0]
seq_length = input_shape[1]
width = input_shape[2]
output = input_tensor
if use_token_type:
if token_type_ids is None:
raise ValueError("`token_type_ids` must be specified if"
"`use_token_type` is True.")
token_type_table = tf.get_variable(
name=token_type_embedding_name,
shape=[token_type_vocab_size, width],
initializer=create_initializer(initializer_range))
# This vocab will be small so we always do one-hot here, since it is always
# faster for a small vocabulary.
flat_token_type_ids = tf.reshape(token_type_ids, [-1])
one_hot_ids = tf.one_hot(flat_token_type_ids, depth=token_type_vocab_size)
token_type_embeddings = tf.matmul(one_hot_ids, token_type_table)
token_type_embeddings = tf.reshape(token_type_embeddings,
[batch_size, seq_length, width])
output += token_type_embeddings
if use_position_embeddings:
assert_op = tf.assert_less_equal(seq_length, max_position_embeddings)
with tf.control_dependencies([assert_op]):
full_position_embeddings = tf.get_variable(
name=position_embedding_name,
shape=[max_position_embeddings, width],
initializer=create_initializer(initializer_range))
# Since the position embedding table is a learned variable, we create it
# using a (long) sequence length `max_position_embeddings`. The actual
# sequence length might be shorter than this, for faster training of
# tasks that do not have long sequences.
#
# So `full_position_embeddings` is effectively an embedding table
# for position [0, 1, 2, ..., max_position_embeddings-1], and the current
# sequence has positions [0, 1, 2, ... seq_length-1], so we can just
# perform a slice.
position_embeddings = tf.slice(full_position_embeddings, [0, 0],
[seq_length, -1])
num_dims = len(output.shape.as_list())
# Only the last two dimensions are relevant (`seq_length` and `width`), so
# we broadcast among the first dimensions, which is typically just
# the batch size.
position_broadcast_shape = []
for _ in range(num_dims - 2):
position_broadcast_shape.append(1)
position_broadcast_shape.extend([seq_length, width])
position_embeddings = tf.reshape(position_embeddings,
position_broadcast_shape)
output += position_embeddings
output = layer_norm_and_dropout(output, dropout_prob)
return output
def create_attention_mask_from_input_mask(from_tensor, to_mask):
"""Create 3D attention mask from a 2D tensor mask.
Args:
from_tensor: 2D or 3D Tensor of shape [batch_size, from_seq_length, ...].
to_mask: int32 Tensor of shape [batch_size, to_seq_length].
Returns:
float Tensor of shape [batch_size, from_seq_length, to_seq_length].
"""
from_shape = get_shape_list(from_tensor, expected_rank=[2, 3])
batch_size = from_shape[0]
from_seq_length = from_shape[1]
to_shape = get_shape_list(to_mask, expected_rank=2)
to_seq_length = to_shape[1]
to_mask = tf.cast(
tf.reshape(to_mask, [batch_size, 1, to_seq_length]), tf.float32)
# We don't assume that `from_tensor` is a mask (although it could be). We
# don't actually care if we attend *from* padding tokens (only *to* padding)
# tokens so we create a tensor of all ones.
#
# `broadcast_ones` = [batch_size, from_seq_length, 1]
broadcast_ones = tf.ones(
shape=[batch_size, from_seq_length, 1], dtype=tf.float32)
# Here we broadcast along two dimensions to create the mask.
mask = broadcast_ones * to_mask
return mask
def attention_layer(from_tensor,
to_tensor,
attention_mask=None,
num_attention_heads=1,
size_per_head=512,
query_act=None,
key_act=None,
value_act=None,
attention_probs_dropout_prob=0.0,
initializer_range=0.02,
do_return_2d_tensor=False,
batch_size=None,
from_seq_length=None,
to_seq_length=None):
"""Performs multi-headed attention from `from_tensor` to `to_tensor`.
This is an implementation of multi-headed attention based on "Attention
is all you Need". If `from_tensor` and `to_tensor` are the same, then
this is self-attention. Each timestep in `from_tensor` attends to the
corresponding sequence in `to_tensor`, and returns a fixed-with vector.
This function first projects `from_tensor` into a "query" tensor and
`to_tensor` into "key" and "value" tensors. These are (effectively) a list
of tensors of length `num_attention_heads`, where each tensor is of shape
[batch_size, seq_length, size_per_head].
Then, the query and key tensors are dot-producted and scaled. These are
softmaxed to obtain attention probabilities. The value tensors are then
interpolated by these probabilities, then concatenated back to a single
tensor and returned.
In practice, the multi-headed attention are done with transposes and
reshapes rather than actual separate tensors.
Args:
from_tensor: float Tensor of shape [batch_size, from_seq_length,
from_width].
to_tensor: float Tensor of shape [batch_size, to_seq_length, to_width].
attention_mask: (optional) int32 Tensor of shape [batch_size,
from_seq_length, to_seq_length]. The values should be 1 or 0. The
attention scores will effectively be set to -infinity for any positions in
the mask that are 0, and will be unchanged for positions that are 1.
num_attention_heads: int. Number of attention heads.
size_per_head: int. Size of each attention head.
query_act: (optional) Activation function for the query transform.
key_act: (optional) Activation function for the key transform.
value_act: (optional) Activation function for the value transform.
attention_probs_dropout_prob: (optional) float. Dropout probability of the
attention probabilities.
initializer_range: float. Range of the weight initializer.
do_return_2d_tensor: bool. If True, the output will be of shape [batch_size
* from_seq_length, num_attention_heads * size_per_head]. If False, the
output will be of shape [batch_size, from_seq_length, num_attention_heads
* size_per_head].
batch_size: (Optional) int. If the input is 2D, this might be the batch size
of the 3D version of the `from_tensor` and `to_tensor`.
from_seq_length: (Optional) If the input is 2D, this might be the seq length
of the 3D version of the `from_tensor`.
to_seq_length: (Optional) If the input is 2D, this might be the seq length
of the 3D version of the `to_tensor`.
Returns:
float Tensor of shape [batch_size, from_seq_length,
num_attention_heads * size_per_head]. (If `do_return_2d_tensor` is
true, this will be of shape [batch_size * from_seq_length,
num_attention_heads * size_per_head]).
Raises:
ValueError: Any of the arguments or tensor shapes are invalid.
"""
def transpose_for_scores(input_tensor, batch_size, num_attention_heads,
seq_length, width):
output_tensor = tf.reshape(
input_tensor, [batch_size, seq_length, num_attention_heads, width])
output_tensor = tf.transpose(output_tensor, [0, 2, 1, 3])
return output_tensor
from_shape = get_shape_list(from_tensor, expected_rank=[2, 3])
to_shape = get_shape_list(to_tensor, expected_rank=[2, 3])
if len(from_shape) != len(to_shape):
raise ValueError(
"The rank of `from_tensor` must match the rank of `to_tensor`.")
if len(from_shape) == 3:
batch_size = from_shape[0]
from_seq_length = from_shape[1]
to_seq_length = to_shape[1]
elif len(from_shape) == 2:
if (batch_size is None or from_seq_length is None or to_seq_length is None):
raise ValueError(
"When passing in rank 2 tensors to attention_layer, the values "
"for `batch_size`, `from_seq_length`, and `to_seq_length` "
"must all be specified.")
# Scalar dimensions referenced here:
# B = batch size (number of sequences)
# F = `from_tensor` sequence length
# T = `to_tensor` sequence length
# N = `num_attention_heads`
# H = `size_per_head`
from_tensor_2d = reshape_to_matrix(from_tensor)
to_tensor_2d = reshape_to_matrix(to_tensor)
# `query_layer` = [B*F, N*H]
query_layer = tf.layers.dense(
from_tensor_2d,
num_attention_heads * size_per_head,
activation=query_act,
name="query",
kernel_initializer=create_initializer(initializer_range))
# `key_layer` = [B*T, N*H]
key_layer = tf.layers.dense(
to_tensor_2d,
num_attention_heads * size_per_head,
activation=key_act,
name="key",
kernel_initializer=create_initializer(initializer_range))
# `value_layer` = [B*T, N*H]
value_layer = tf.layers.dense(
to_tensor_2d,
num_attention_heads * size_per_head,
activation=value_act,
name="value",
kernel_initializer=create_initializer(initializer_range))
# `query_layer` = [B, N, F, H]
query_layer = transpose_for_scores(query_layer, batch_size,
num_attention_heads, from_seq_length,
size_per_head)
# `key_layer` = [B, N, T, H]
key_layer = transpose_for_scores(key_layer, batch_size, num_attention_heads,
to_seq_length, size_per_head)
# Take the dot product between "query" and "key" to get the raw
# attention scores.
# `attention_scores` = [B, N, F, T]
attention_scores = tf.matmul(query_layer, key_layer, transpose_b=True)
attention_scores = tf.multiply(attention_scores,
1.0 / math.sqrt(float(size_per_head)))
if attention_mask is not None:
# `attention_mask` = [B, 1, F, T]
attention_mask = tf.expand_dims(attention_mask, axis=[1])
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
adder = (1.0 - tf.cast(attention_mask, tf.float32)) * -10000.0
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
attention_scores += adder
# Normalize the attention scores to probabilities.
# `attention_probs` = [B, N, F, T]
attention_probs = tf.nn.softmax(attention_scores)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = dropout(attention_probs, attention_probs_dropout_prob)
# `value_layer` = [B, T, N, H]
value_layer = tf.reshape(
value_layer,
[batch_size, to_seq_length, num_attention_heads, size_per_head])
# `value_layer` = [B, N, T, H]
value_layer = tf.transpose(value_layer, [0, 2, 1, 3])
# `context_layer` = [B, N, F, H]
context_layer = tf.matmul(attention_probs, value_layer)
# `context_layer` = [B, F, N, H]
context_layer = tf.transpose(context_layer, [0, 2, 1, 3])
if do_return_2d_tensor:
# `context_layer` = [B*F, N*V]
context_layer = tf.reshape(
context_layer,
[batch_size * from_seq_length, num_attention_heads * size_per_head])
else:
# `context_layer` = [B, F, N*V]
context_layer = tf.reshape(
context_layer,
[batch_size, from_seq_length, num_attention_heads * size_per_head])
return context_layer
def transformer_model(input_tensor,
attention_mask=None,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
intermediate_act_fn=gelu,
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
initializer_range=0.02,
do_return_all_layers=False):
"""Multi-headed, multi-layer Transformer from "Attention is All You Need".
This is almost an exact implementation of the original Transformer encoder.
See the original paper:
https://arxiv.org/abs/1706.03762
Also see:
https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/models/transformer.py
Args:
input_tensor: float Tensor of shape [batch_size, seq_length, hidden_size].
attention_mask: (optional) int32 Tensor of shape [batch_size, seq_length,
seq_length], with 1 for positions that can be attended to and 0 in
positions that should not be.
hidden_size: int. Hidden size of the Transformer.
num_hidden_layers: int. Number of layers (blocks) in the Transformer.
num_attention_heads: int. Number of attention heads in the Transformer.
intermediate_size: int. The size of the "intermediate" (a.k.a., feed
forward) layer.
intermediate_act_fn: function. The non-linear activation function to apply
to the output of the intermediate/feed-forward layer.
hidden_dropout_prob: float. Dropout probability for the hidden layers.
attention_probs_dropout_prob: float. Dropout probability of the attention
probabilities.
initializer_range: float. Range of the initializer (stddev of truncated
normal).
do_return_all_layers: Whether to also return all layers or just the final
layer.
Returns:
float Tensor of shape [batch_size, seq_length, hidden_size], the final
hidden layer of the Transformer.
Raises:
ValueError: A Tensor shape or parameter is invalid.
"""
if hidden_size % num_attention_heads != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (hidden_size, num_attention_heads))
attention_head_size = int(hidden_size / num_attention_heads)
input_shape = get_shape_list(input_tensor, expected_rank=3)
batch_size = input_shape[0]
seq_length = input_shape[1]
input_width = input_shape[2]
# The Transformer performs sum residuals on all layers so the input needs
# to be the same as the hidden size.
if input_width != hidden_size:
raise ValueError("The width of the input tensor (%d) != hidden size (%d)" %
(input_width, hidden_size))
# We keep the representation as a 2D tensor to avoid re-shaping it back and
# forth from a 3D tensor to a 2D tensor. Re-shapes are normally free on
# the GPU/CPU but may not be free on the TPU, so we want to minimize them to
# help the optimizer.
prev_output = reshape_to_matrix(input_tensor)
all_layer_outputs = []
for layer_idx in range(num_hidden_layers):
with tf.variable_scope("layer_%d" % layer_idx):
layer_input = prev_output
with tf.variable_scope("attention"):
attention_heads = []
with tf.variable_scope("self"):
attention_head = attention_layer(
from_tensor=layer_input,
to_tensor=layer_input,
attention_mask=attention_mask,
num_attention_heads=num_attention_heads,
size_per_head=attention_head_size,
attention_probs_dropout_prob=attention_probs_dropout_prob,
initializer_range=initializer_range,
do_return_2d_tensor=True,
batch_size=batch_size,
from_seq_length=seq_length,
to_seq_length=seq_length)
attention_heads.append(attention_head)
attention_output = None
if len(attention_heads) == 1:
attention_output = attention_heads[0]
else:
# In the case where we have other sequences, we just concatenate
# them to the self-attention head before the projection.
attention_output = tf.concat(attention_heads, axis=-1)
# Run a linear projection of `hidden_size` then add a residual
# with `layer_input`.
with tf.variable_scope("output"):
attention_output = tf.layers.dense(
attention_output,
hidden_size,
kernel_initializer=create_initializer(initializer_range))
attention_output = dropout(attention_output, hidden_dropout_prob)
attention_output = layer_norm(attention_output + layer_input)
# The activation is only applied to the "intermediate" hidden layer.
with tf.variable_scope("intermediate"):
intermediate_output = tf.layers.dense(
attention_output,
intermediate_size,
activation=intermediate_act_fn,
kernel_initializer=create_initializer(initializer_range))
# Down-project back to `hidden_size` then add the residual.
with tf.variable_scope("output"):
layer_output = tf.layers.dense(
intermediate_output,
hidden_size,
kernel_initializer=create_initializer(initializer_range))
layer_output = dropout(layer_output, hidden_dropout_prob)
layer_output = layer_norm(layer_output + attention_output)
prev_output = layer_output
all_layer_outputs.append(layer_output)
if do_return_all_layers:
final_outputs = []
for layer_output in all_layer_outputs:
final_output = reshape_from_matrix(layer_output, input_shape)
final_outputs.append(final_output)
return final_outputs
else:
final_output = reshape_from_matrix(prev_output, input_shape)
return final_output
def get_shape_list(tensor, expected_rank=None, name=None):
"""Returns a list of the shape of tensor, preferring static dimensions.
Args:
tensor: A tf.Tensor object to find the shape of.
expected_rank: (optional) int. The expected rank of `tensor`. If this is
specified and the `tensor` has a different rank, and exception will be
thrown.
name: Optional name of the tensor for the error message.
Returns:
A list of dimensions of the shape of tensor. All static dimensions will
be returned as python integers, and dynamic dimensions will be returned
as tf.Tensor scalars.
"""
if name is None:
name = tensor.name
if expected_rank is not None:
assert_rank(tensor, expected_rank, name)
shape = tensor.shape.as_list()
non_static_indexes = []
for (index, dim) in enumerate(shape):
if dim is None:
non_static_indexes.append(index)
if not non_static_indexes:
return shape
dyn_shape = tf.shape(tensor)
for index in non_static_indexes:
shape[index] = dyn_shape[index]
return shape
def reshape_to_matrix(input_tensor):
"""Reshapes a >= rank 2 tensor to a rank 2 tensor (i.e., a matrix)."""
ndims = input_tensor.shape.ndims
if ndims < 2:
raise ValueError("Input tensor must have at least rank 2. Shape = %s" %
(input_tensor.shape))
if ndims == 2:
return input_tensor
width = input_tensor.shape[-1]
output_tensor = tf.reshape(input_tensor, [-1, width])
return output_tensor
def reshape_from_matrix(output_tensor, orig_shape_list):
"""Reshapes a rank 2 tensor back to its original rank >= 2 tensor."""
if len(orig_shape_list) == 2:
return output_tensor
output_shape = get_shape_list(output_tensor)
orig_dims = orig_shape_list[0:-1]
width = output_shape[-1]
return tf.reshape(output_tensor, orig_dims + [width])
def assert_rank(tensor, expected_rank, name=None):
"""Raises an exception if the tensor rank is not of the expected rank.
Args:
tensor: A tf.Tensor to check the rank of.
expected_rank: Python integer or list of integers, expected rank.
name: Optional name of the tensor for the error message.
Raises:
ValueError: If the expected shape doesn't match the actual shape.
"""
if name is None:
name = tensor.name
expected_rank_dict = {}
if isinstance(expected_rank, six.integer_types):
expected_rank_dict[expected_rank] = True
else:
for x in expected_rank:
expected_rank_dict[x] = True
actual_rank = tensor.shape.ndims
if actual_rank not in expected_rank_dict:
scope_name = tf.get_variable_scope().name
raise ValueError(
"For the tensor `%s` in scope `%s`, the actual rank "
"`%d` (shape = %s) is not equal to the expected rank `%s`" %
(name, scope_name, actual_rank, str(tensor.shape), str(expected_rank)))
| 125softNLP | /125softNLP-0.0.1-py3-none-any.whl/pysoftNLP/bert/modeling.py | modeling.py |
import os
from enum import Enum
class PoolingStrategy(Enum):
NONE = 0
REDUCE_MAX = 1
REDUCE_MEAN = 2
REDUCE_MEAN_MAX = 3
FIRST_TOKEN = 4 # corresponds to [CLS] for single sequences
LAST_TOKEN = 5 # corresponds to [SEP] for single sequences
CLS_TOKEN = 4 # corresponds to the first token for single seq.
SEP_TOKEN = 5 # corresponds to the last token for single seq.
def __str__(self):
return self.name
@staticmethod
def from_string(s):
try:
return PoolingStrategy[s]
except KeyError:
raise ValueError()
# file_path = os.path.dirname(os.path.dirname(__file__))
# file_path = 'D:\pysoftNLP_resources'
# model_dir = os.path.join(file_path, 'chinese_L-12_H-768_A-12')
# print(file_path)
model_dir = 'D:\pysoftNLP_resources\pre_training_file\chinese_L-12_H-768_A-12'
print('okkkkkkkk',model_dir)
config_name = os.path.join(model_dir, 'bert_config.json')
ckpt_name = os.path.join(model_dir, 'bert_model.ckpt')
vocab_file = os.path.join(model_dir, 'vocab.txt')
# the maximum length of a sequence,Sequences larger than max_seq_len will be truncated on the left side. Thus, if you
# want to send long sequences to the model, please make sure the program can handle them correctly.
#max_seq_len = 5
xla = True
# list of int. this model has 12 layers, By default this program works on the second last layer. The last layer is too
# closed to the target functions,If you question about this argument and want to use the last hidden layer anyway, please
# feel free to set layer_indexes=[-1], so we use the second last layer
layer_indexes = [-2]
#pooling_strategy = PoolingStrategy.NONE
| 125softNLP | /125softNLP-0.0.1-py3-none-any.whl/pysoftNLP/bert/args.py | args.py |
# coding:utf-8
import os
import tempfile
import random
import json
import logging
from termcolor import colored
from pysoftNLP.bert import modeling
from pysoftNLP.bert import args
from pysoftNLP.bert.args import PoolingStrategy
import contextlib
def import_tf(device_id=-1, verbose=False):
#os.environ['CUDA_VISIBLE_DEVICES'] = '-1' if device_id < 0 else str(device_id)
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '0' if verbose else '3'
import tensorflow as tf
tf.logging.set_verbosity(tf.logging.DEBUG if verbose else tf.logging.ERROR)
return tf
def set_logger(context, verbose=False):
logger = logging.getLogger(context)
logger.setLevel(logging.DEBUG if verbose else logging.INFO)
formatter = logging.Formatter(
'%(levelname)-.1s:' + context + ':[%(filename).5s:%(funcName).3s:%(lineno)3d]:%(message)s', datefmt=
'%m-%d %H:%M:%S')
console_handler = logging.StreamHandler()
console_handler.setLevel(logging.DEBUG if verbose else logging.INFO)
console_handler.setFormatter(formatter)
logger.handlers = []
logger.addHandler(console_handler)
return logger
def optimize_graph(logger=None, verbose=False, pooling_strategy=PoolingStrategy.REDUCE_MEAN, max_seq_len=40):
if not logger:
logger = set_logger(colored('BERT_VEC', 'yellow'), verbose)
try:
# we don't need GPU for optimizing the graph
tf = import_tf(device_id=0, verbose=verbose)
from tensorflow.python.tools.optimize_for_inference_lib import optimize_for_inference
# allow_soft_placement:自动选择运行设备
config = tf.ConfigProto(allow_soft_placement=True)
config_fp = args.config_name
init_checkpoint = args.ckpt_name
logger.info('model config: %s' % config_fp)
# 加载bert配置文件
with tf.gfile.GFile(config_fp, 'r') as f:
bert_config = modeling.BertConfig.from_dict(json.load(f))
logger.info('build graph...')
# input placeholders, not sure if they are friendly to XLA
input_ids = tf.placeholder(tf.int32, (None, max_seq_len), 'input_ids')
input_mask = tf.placeholder(tf.int32, (None, max_seq_len), 'input_mask')
input_type_ids = tf.placeholder(tf.int32, (None, max_seq_len), 'input_type_ids')
# xla加速
jit_scope = tf.contrib.compiler.jit.experimental_jit_scope if args.xla else contextlib.suppress
with jit_scope():
input_tensors = [input_ids, input_mask, input_type_ids]
model = modeling.BertModel(
config=bert_config,
is_training=False,
input_ids=input_ids,
input_mask=input_mask,
token_type_ids=input_type_ids,
use_one_hot_embeddings=False)
# 获取所有要训练的变量
tvars = tf.trainable_variables()
(assignment_map, initialized_variable_names) = modeling.get_assignment_map_from_checkpoint(tvars,
init_checkpoint)
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
minus_mask = lambda x, m: x - tf.expand_dims(1.0 - m, axis=-1) * 1e30
mul_mask = lambda x, m: x * tf.expand_dims(m, axis=-1)
masked_reduce_max = lambda x, m: tf.reduce_max(minus_mask(x, m), axis=1)
masked_reduce_mean = lambda x, m: tf.reduce_sum(mul_mask(x, m), axis=1) / (
tf.reduce_sum(m, axis=1, keepdims=True) + 1e-10)
# 共享卷积核
with tf.variable_scope("pooling"):
# 如果只有一层,就只取对应那一层的weight
if len(args.layer_indexes) == 1:
encoder_layer = model.all_encoder_layers[args.layer_indexes[0]]
else:
# 否则遍历需要取的层,把所有层的weight取出来并拼接起来shape:768*层数
all_layers = [model.all_encoder_layers[l] for l in args.layer_indexes]
encoder_layer = tf.concat(all_layers, -1)
input_mask = tf.cast(input_mask, tf.float32)
# 以下代码是句向量的生成方法,可以理解为做了一个卷积的操作,但是没有把结果相加, 卷积核是input_mask
if pooling_strategy == PoolingStrategy.REDUCE_MEAN:
pooled = masked_reduce_mean(encoder_layer, input_mask)
elif pooling_strategy == PoolingStrategy.REDUCE_MAX:
pooled = masked_reduce_max(encoder_layer, input_mask)
elif pooling_strategy == PoolingStrategy.REDUCE_MEAN_MAX:
pooled = tf.concat([masked_reduce_mean(encoder_layer, input_mask),
masked_reduce_max(encoder_layer, input_mask)], axis=1)
elif pooling_strategy == PoolingStrategy.FIRST_TOKEN or \
pooling_strategy == PoolingStrategy.CLS_TOKEN:
pooled = tf.squeeze(encoder_layer[:, 0:1, :], axis=1)
elif pooling_strategy == PoolingStrategy.LAST_TOKEN or \
pooling_strategy == PoolingStrategy.SEP_TOKEN:
seq_len = tf.cast(tf.reduce_sum(input_mask, axis=1), tf.int32)
rng = tf.range(0, tf.shape(seq_len)[0])
indexes = tf.stack([rng, seq_len - 1], 1)
pooled = tf.gather_nd(encoder_layer, indexes)
elif pooling_strategy == PoolingStrategy.NONE:
pooled = mul_mask(encoder_layer, input_mask)
else:
raise NotImplementedError()
pooled = tf.identity(pooled, 'final_encodes')
output_tensors = [pooled]
tmp_g = tf.get_default_graph().as_graph_def()
with tf.Session(config=config) as sess:
logger.info('load parameters from checkpoint...')
sess.run(tf.global_variables_initializer())
logger.info('freeze...')
tmp_g = tf.graph_util.convert_variables_to_constants(sess, tmp_g, [n.name[:-2] for n in output_tensors])
dtypes = [n.dtype for n in input_tensors]
logger.info('optimize...')
tmp_g = optimize_for_inference(
tmp_g,
[n.name[:-2] for n in input_tensors],
[n.name[:-2] for n in output_tensors],
[dtype.as_datatype_enum for dtype in dtypes],
False)
#tmp_file = tempfile.NamedTemporaryFile('w', delete=True).name
#r = random.randint(1, 1000)
#tmp_file = "./tmp_graph"+str(r)
tmp_file = "./tmp_graph11"
logger.info('write graph to a tmp file: %s' % tmp_file)
with tf.gfile.GFile(tmp_file, 'wb') as f:
f.write(tmp_g.SerializeToString())
return tmp_file
except Exception as e:
logger.error('fail to optimize the graph!')
logger.error(e)
| 125softNLP | /125softNLP-0.0.1-py3-none-any.whl/pysoftNLP/bert/graph.py | graph.py |
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 125softNLP | /125softNLP-0.0.1-py3-none-any.whl/pysoftNLP/bert/__init__.py | __init__.py |
# encoding: utf-8
# author: BrikerMan
# contact: eliyar917@gmail.com
# blog: https://eliyar.biz
# file: migration.py
# time: 2:31 下午
import subprocess
import logging
guide = """
╭─────────────────────────────────────────────────────────────────────────╮
│ ◎ ○ ○ ░░░░░░░░░░░░░░░░░░░░░ Important Message ░░░░░░░░░░░░░░░░░░░░░░░░│
├─────────────────────────────────────────────────────────────────────────┤
│ │
│ We renamed again for consistency and clarity. │
│ From now on, it is all `kashgari`. │
│ Changelog: https://github.com/BrikerMan/Kashgari/releases/tag/v1.0.0 │
│ │
│ | Backend | pypi version | desc | │
│ | ---------------- | -------------- | -------------- | │
│ | TensorFlow 2.x | kashgari 2.x.x | coming soon | │
│ | TensorFlow 1.14+ | kashgari 1.x.x | | │
│ | Keras | kashgari 0.x.x | legacy version | │
│ │
╰─────────────────────────────────────────────────────────────────────────╯
"""
def show_migration_guide():
requirements = subprocess.getoutput("pip freeze")
for package in requirements.splitlines():
if '==' in package:
package_name, package_version = package.split('==')
if package_name == 'kashgari-tf':
logging.warning(guide)
if __name__ == "__main__":
show_migration_guide()
print("hello, world")
| 125softNLP | /125softNLP-0.0.1-py3-none-any.whl/kashgari/migeration.py | migeration.py |
# encoding: utf-8
"""
@author: BrikerMan
@contact: eliyar917@gmail.com
@blog: https://eliyar.biz
@version: 1.0
@license: Apache Licence
@file: macros.py
@time: 2019-05-17 11:38
"""
import os
import logging
from pathlib import Path
import tensorflow as tf
DATA_PATH = os.path.join(str(Path.home()), '.kashgari')
Path(DATA_PATH).mkdir(exist_ok=True, parents=True)
class TaskType(object):
CLASSIFICATION = 'classification'
LABELING = 'labeling'
SCORING = 'scoring'
class Config(object):
def __init__(self):
self._use_cudnn_cell = False
self.disable_auto_summary = False
if tf.test.is_gpu_available(cuda_only=True):
logging.warning("CUDA GPU available, you can set `kashgari.config.use_cudnn_cell = True` to use CuDNNCell. "
"This will speed up the training, "
"but will make model incompatible with CPU device.")
@property
def use_cudnn_cell(self):
return self._use_cudnn_cell
@use_cudnn_cell.setter
def use_cudnn_cell(self, value):
self._use_cudnn_cell = value
from kashgari.layers import L
if value:
if tf.test.is_gpu_available(cuda_only=True):
L.LSTM = tf.compat.v1.keras.layers.CuDNNLSTM
L.GRU = tf.compat.v1.keras.layers.CuDNNGRU
logging.warning("CuDNN enabled, this will speed up the training, "
"but will make model incompatible with CPU device.")
else:
logging.warning("Unable to use CuDNN cell, no GPU available.")
else:
L.LSTM = tf.keras.layers.LSTM
L.GRU = tf.keras.layers.GRU
def to_dict(self):
return {
'use_cudnn_cell': self.use_cudnn_cell
}
config = Config()
if __name__ == "__main__":
print("Hello world")
| 125softNLP | /125softNLP-0.0.1-py3-none-any.whl/kashgari/macros.py | macros.py |
# encoding: utf-8
# author: BrikerMan
# contact: eliyar917@gmail.com
# blog: https://eliyar.biz
# file: __version__.py.py
# time: 2019-05-20 16:32
__version__ = '1.1.1'
| 125softNLP | /125softNLP-0.0.1-py3-none-any.whl/kashgari/__version__.py | __version__.py |
# encoding: utf-8
import os
import logging
import pandas as pd
from kashgari import macros as k
from typing import Tuple, List
from tensorflow.python.keras.utils import get_file
from kashgari import utils
CORPUS_PATH = os.path.join(k.DATA_PATH, 'corpus')
class DataReader(object):
@staticmethod
def read_conll_format_file(file_path: str,
text_index: int = 0,
label_index: int = 1) -> Tuple[List[List[str]], List[List[str]]]:
"""
Read conll format data_file
Args:
file_path: path of target file
text_index: index of text data, default 0
label_index: index of label data, default 1
Returns:
"""
x_data, y_data = [], []
with open(file_path, 'r', encoding='utf-8') as f:
lines = f.read().splitlines()
x, y = [], []
for line in lines:
rows = line.split(' ')
if len(rows) == 1:
x_data.append(x)
y_data.append(y)
x = []
y = []
else:
x.append(rows[text_index])
y.append(rows[label_index])
return x_data, y_data
class ChineseDailyNerCorpus(object):
"""
Chinese Daily New New Corpus
https://github.com/zjy-ucas/ChineseNER/
"""
# __corpus_name__ = 'china-people-daily-ner-corpus'
__corpus_name__ ='D:\pysoftNLP_resources\ner\china-people-daily-ner-corpus'
__zip_file__name = 'http://s3.bmio.net/kashgari/china-people-daily-ner-corpus.tar.gz'
@classmethod
def load_data(cls,
subset_name: str = 'train',
shuffle: bool = True) -> Tuple[List[List[str]], List[List[str]]]:
"""
Load dataset as sequence labeling format, char level tokenized
features: ``[['海', '钓', '比', '赛', '地', '点', '在', '厦', '门', ...], ...]``
labels: ``[['O', 'O', 'O', 'O', 'O', 'O', 'O', 'B-LOC', 'I-LOC', ...], ...]``
Sample::
train_x, train_y = ChineseDailyNerCorpus.load_data('train')
test_x, test_y = ChineseDailyNerCorpus.load_data('test')
Args:
subset_name: {train, test, valid}
shuffle: should shuffle or not, default True.
Returns:
dataset_features and dataset labels
"""
corpus_path = get_file(cls.__corpus_name__,
cls.__zip_file__name,
cache_dir=k.DATA_PATH,
untar=True)
corpus_path = 'D:\pysoftNLP_resources\entity_recognition\china-people-daily-ner-corpus'
print(corpus_path)
if subset_name == 'train':
file_path = os.path.join(corpus_path, 'example.train')
elif subset_name == 'test':
file_path = os.path.join(corpus_path, 'example.test')
else:
file_path = os.path.join(corpus_path, 'example.dev')
x_data, y_data = DataReader.read_conll_format_file(file_path)
if shuffle:
x_data, y_data = utils.unison_shuffled_copies(x_data, y_data)
logging.debug(f"loaded {len(x_data)} samples from {file_path}. Sample:\n"
f"x[0]: {x_data[0]}\n"
f"y[0]: {y_data[0]}")
return x_data, y_data
class CONLL2003ENCorpus(object):
__corpus_name__ = 'conll2003_en'
__zip_file__name = 'http://s3.bmio.net/kashgari/conll2003_en.tar.gz'
@classmethod
def load_data(cls,
subset_name: str = 'train',
task_name: str = 'ner',
shuffle: bool = True) -> Tuple[List[List[str]], List[List[str]]]:
"""
"""
corpus_path = get_file(cls.__corpus_name__,
cls.__zip_file__name,
cache_dir=k.DATA_PATH,
untar=True)
if subset_name not in {'train', 'test', 'valid'}:
raise ValueError()
file_path = os.path.join(corpus_path, f'{subset_name}.txt')
if task_name not in {'pos', 'chunking', 'ner'}:
raise ValueError()
data_index = ['pos', 'chunking', 'ner'].index(task_name) + 1
x_data, y_data = DataReader.read_conll_format_file(file_path, label_index=data_index)
if shuffle:
x_data, y_data = utils.unison_shuffled_copies(x_data, y_data)
logging.debug(f"loaded {len(x_data)} samples from {file_path}. Sample:\n"
f"x[0]: {x_data[0]}\n"
f"y[0]: {y_data[0]}")
return x_data, y_data
class SMP2018ECDTCorpus(object):
"""
https://worksheets.codalab.org/worksheets/0x27203f932f8341b79841d50ce0fd684f/
This dataset is released by the Evaluation of Chinese Human-Computer Dialogue Technology (SMP2018-ECDT)
task 1 and is provided by the iFLYTEK Corporation, which is a Chinese human-computer dialogue dataset.
sample::
label query
0 weather 今天东莞天气如何
1 map 从观音桥到重庆市图书馆怎么走
2 cookbook 鸭蛋怎么腌?
3 health 怎么治疗牛皮癣
4 chat 唠什么
"""
__corpus_name__ = 'SMP2018ECDTCorpus'
__zip_file__name = 'http://s3.bmio.net/kashgari/SMP2018ECDTCorpus.tar.gz'
@classmethod
def load_data(cls,
subset_name: str = 'train',
shuffle: bool = True,
cutter: str = 'char') -> Tuple[List[List[str]], List[str]]:
"""
Load dataset as sequence classification format, char level tokenized
features: ``[['听', '新', '闻', '。'], ['电', '视', '台', '在', '播', '什', '么'], ...]``
labels: ``['news', 'epg', ...]``
Samples::
train_x, train_y = SMP2018ECDTCorpus.load_data('train')
test_x, test_y = SMP2018ECDTCorpus.load_data('test')
Args:
subset_name: {train, test, valid}
shuffle: should shuffle or not, default True.
cutter: sentence cutter, {char, jieba}
Returns:
dataset_features and dataset labels
"""
corpus_path = get_file(cls.__corpus_name__,
cls.__zip_file__name,
cache_dir=k.DATA_PATH,
untar=True)
if cutter not in ['char', 'jieba', 'none']:
raise ValueError('cutter error, please use one onf the {char, jieba}')
df_path = os.path.join(corpus_path, f'{subset_name}.csv')
df = pd.read_csv(df_path)
if cutter == 'jieba':
try:
import jieba
except ModuleNotFoundError:
raise ModuleNotFoundError(
"please install jieba, `$ pip install jieba`")
x_data = [list(jieba.cut(item)) for item in df['query'].to_list()]
elif 'char':
x_data = [list(item) for item in df['query'].to_list()]
y_data = df['label'].to_list()
if shuffle:
x_data, y_data = utils.unison_shuffled_copies(x_data, y_data)
logging.debug(f"loaded {len(x_data)} samples from {df_path}. Sample:\n"
f"x[0]: {x_data[0]}\n"
f"y[0]: {y_data[0]}")
return x_data, y_data
if __name__ == "__main__":
a, b = CONLL2003ENCorpus.load_data()
print(a[:2])
print(b[:2])
print("Hello world")
| 125softNLP | /125softNLP-0.0.1-py3-none-any.whl/kashgari/corpus.py | corpus.py |
# encoding: utf-8
# author: BrikerMan
# contact: eliyar917@gmail.com
# blog: https://eliyar.biz
# file: callbacks.py
# time: 2019-05-22 15:00
from sklearn import metrics
from kashgari import macros
from tensorflow.python import keras
from kashgari.tasks.base_model import BaseModel
from seqeval import metrics as seq_metrics
class EvalCallBack(keras.callbacks.Callback):
def __init__(self, kash_model: BaseModel, valid_x, valid_y,
step=5, batch_size=256, average='weighted'):
"""
Evaluate callback, calculate precision, recall and f1
Args:
kash_model: the kashgari model to evaluate
valid_x: feature data
valid_y: label data
step: step, default 5
batch_size: batch size, default 256
"""
super(EvalCallBack, self).__init__()
self.kash_model = kash_model
self.valid_x = valid_x
self.valid_y = valid_y
self.step = step
self.batch_size = batch_size
self.average = average
self.logs = []
def on_epoch_end(self, epoch, logs=None):
if (epoch + 1) % self.step == 0:
y_pred = self.kash_model.predict(self.valid_x, batch_size=self.batch_size)
if self.kash_model.task == macros.TaskType.LABELING:
y_true = [seq[:len(y_pred[index])] for index, seq in enumerate(self.valid_y)]
precision = seq_metrics.precision_score(y_true, y_pred)
recall = seq_metrics.recall_score(y_true, y_pred)
f1 = seq_metrics.f1_score(y_true, y_pred)
else:
y_true = self.valid_y
precision = metrics.precision_score(y_true, y_pred, average=self.average)
recall = metrics.recall_score(y_true, y_pred, average=self.average)
f1 = metrics.f1_score(y_true, y_pred, average=self.average)
self.logs.append({
'precision': precision,
'recall': recall,
'f1': f1
})
print(f"\nepoch: {epoch} precision: {precision:.6f}, recall: {recall:.6f}, f1: {f1:.6f}")
if __name__ == "__main__":
print("Hello world")
| 125softNLP | /125softNLP-0.0.1-py3-none-any.whl/kashgari/callbacks.py | callbacks.py |
# encoding: utf-8
"""
@author: BrikerMan
@contact: eliyar917@gmail.com
@blog: https://eliyar.biz
@version: 1.0
@license: Apache Licence
@file: helpers.py
@time: 2019-05-17 11:37
"""
import json
import os
import pathlib
import pydoc
import random
import time
from typing import List, Optional, Dict, Union
import tensorflow as tf
from tensorflow.python import keras, saved_model
from kashgari import custom_objects
from kashgari.embeddings.base_embedding import Embedding
from kashgari.layers.crf import CRF
from kashgari.processors.base_processor import BaseProcessor
from kashgari.tasks.base_model import BaseModel
from kashgari.tasks.classification.base_model import BaseClassificationModel
from kashgari.tasks.labeling.base_model import BaseLabelingModel
def unison_shuffled_copies(a, b):
assert len(a) == len(b)
c = list(zip(a, b))
random.shuffle(c)
a, b = zip(*c)
return list(a), list(b)
def get_list_subset(target: List, index_list: List[int]) -> List:
return [target[i] for i in index_list if i < len(target)]
def custom_object_scope():
return tf.keras.utils.custom_object_scope(custom_objects)
def load_model(model_path: str, load_weights: bool = True) -> Union[BaseClassificationModel, BaseLabelingModel]:
"""
Load saved model from saved model from `model.save` function
Args:
model_path: model folder path
load_weights: only load model structure and vocabulary when set to False, default True.
Returns:
"""
with open(os.path.join(model_path, 'model_info.json'), 'r') as f:
model_info = json.load(f)
model_class = pydoc.locate(f"{model_info['module']}.{model_info['class_name']}")
model_json_str = json.dumps(model_info['tf_model'])
model = model_class()
model.tf_model = tf.keras.models.model_from_json(model_json_str, custom_objects)
if load_weights:
model.tf_model.load_weights(os.path.join(model_path, 'model_weights.h5'))
embed_info = model_info['embedding']
embed_class = pydoc.locate(f"{embed_info['module']}.{embed_info['class_name']}")
embedding: Embedding = embed_class._load_saved_instance(embed_info,
model_path,
model.tf_model)
model.embedding = embedding
if type(model.tf_model.layers[-1]) == CRF:
model.layer_crf = model.tf_model.layers[-1]
return model
def load_processor(model_path: str) -> BaseProcessor:
"""
Load processor from model
When we using tf-serving, we need to use model's processor to pre-process data
Args:
model_path:
Returns:
"""
with open(os.path.join(model_path, 'model_info.json'), 'r') as f:
model_info = json.load(f)
processor_info = model_info['embedding']['processor']
processor_class = pydoc.locate(f"{processor_info['module']}.{processor_info['class_name']}")
processor: BaseProcessor = processor_class(**processor_info['config'])
return processor
def convert_to_saved_model(model: BaseModel,
model_path: str,
version: str = None,
inputs: Optional[Dict] = None,
outputs: Optional[Dict] = None):
"""
Export model for tensorflow serving
Args:
model: Target model
model_path: The path to which the SavedModel will be stored.
version: The model version code, default timestamp
inputs: dict mapping string input names to tensors. These are added
to the SignatureDef as the inputs.
outputs: dict mapping string output names to tensors. These are added
to the SignatureDef as the outputs.
"""
pathlib.Path(model_path).mkdir(exist_ok=True, parents=True)
if version is None:
version = round(time.time())
export_path = os.path.join(model_path, str(version))
if inputs is None:
inputs = {i.name: i for i in model.tf_model.inputs}
if outputs is None:
outputs = {o.name: o for o in model.tf_model.outputs}
sess = keras.backend.get_session()
saved_model.simple_save(session=sess,
export_dir=export_path,
inputs=inputs,
outputs=outputs)
with open(os.path.join(export_path, 'model_info.json'), 'w') as f:
f.write(json.dumps(model.info(), indent=2, ensure_ascii=True))
f.close()
if __name__ == "__main__":
path = '/Users/brikerman/Desktop/python/Kashgari/tests/classification/saved_models/' \
'kashgari.tasks.classification.models/BiLSTM_Model'
p = load_processor(path)
print(p.process_x_dataset([list('语言模型')]))
print(p.label2idx)
print(p.token2idx)
| 125softNLP | /125softNLP-0.0.1-py3-none-any.whl/kashgari/utils.py | utils.py |
# encoding: utf-8
# author: BrikerMan
# contact: eliyar917@gmail.com
# blog: https://eliyar.biz
# file: migration.py
# time: 2:31 下午
import subprocess
import logging
guide = """
╭─────────────────────────────────────────────────────────────────────────╮
│ ◎ ○ ○ ░░░░░░░░░░░░░░░░░░░░░ Important Message ░░░░░░░░░░░░░░░░░░░░░░░░│
├─────────────────────────────────────────────────────────────────────────┤
│ │
│ We renamed again for consistency and clarity. │
│ From now on, it is all `kashgari`. │
│ Changelog: https://github.com/BrikerMan/Kashgari/releases/tag/v1.0.0 │
│ │
│ | Backend | pypi version | desc | │
│ | ---------------- | -------------- | -------------- | │
│ | TensorFlow 2.x | kashgari 2.x.x | coming soon | │
│ | TensorFlow 1.14+ | kashgari 1.x.x | | │
│ | Keras | kashgari 0.x.x | legacy version | │
│ │
╰─────────────────────────────────────────────────────────────────────────╯
"""
def show_migration_guide():
requirements = subprocess.getoutput("pip freeze")
for package in requirements.splitlines():
if '==' in package:
package_name, package_version = package.split('==')
if package_name == 'kashgari-tf':
logging.warning(guide)
if __name__ == "__main__":
show_migration_guide()
print("hello, world")
| 125softNLP | /125softNLP-0.0.1-py3-none-any.whl/kashgari/migration.py | migration.py |
# encoding: utf-8
# author: BrikerMan
# contact: eliyar917@gmail.com
# blog: https://eliyar.biz
# file: __version__.py.py
# time: 2019-05-20 16:32
__version__ = '1.0.0'
| 125softNLP | /125softNLP-0.0.1-py3-none-any.whl/kashgari/version.py | version.py |
# encoding: utf-8
"""
@author: BrikerMan
@contact: eliyar917@gmail.com
@blog: https://eliyar.biz
@version: 1.0
@license: Apache Licence
@file: __init__.py
@time: 2019-05-17 11:15
"""
import os
os.environ['TF_KERAS'] = '1'
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import tensorflow as tf
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
import keras_bert
from kashgari.macros import TaskType, config
custom_objects = keras_bert.get_custom_objects()
CLASSIFICATION = TaskType.CLASSIFICATION
LABELING = TaskType.LABELING
SCORING = TaskType.SCORING
from kashgari.__version__ import __version__
from kashgari import layers
from kashgari import corpus
from kashgari import embeddings
from kashgari import macros
from kashgari import processors
from kashgari import tasks
from kashgari import utils
from kashgari import callbacks
from kashgari import migration
migration.show_migration_guide()
| 125softNLP | /125softNLP-0.0.1-py3-none-any.whl/kashgari/__init__.py | __init__.py |
# encoding: utf-8
# author: BrikerMan
# contact: eliyar917@gmail.com
# blog: https://eliyar.biz
# file: base_tokenizer.py
# time: 11:24 上午
class Tokenizer:
"""Abstract base class for all implemented tokenizers.
"""
def tokenize(self, text: str):
"""
Tokenize text into token sequence
Args:
text: target text sample
Returns:
List of tokens in this sample
"""
return text.split(' ')
| 125softNLP | /125softNLP-0.0.1-py3-none-any.whl/kashgari/tokenizer/base_tokenizer.py | base_tokenizer.py |
# encoding: utf-8
# author: BrikerMan
# contact: eliyar917@gmail.com
# blog: https://eliyar.biz
# file: bert_tokenizer.py
# time: 11:33 上午
# flake8: noqa: E127
import codecs
import os
import unicodedata
from kashgari.tokenizer.base_tokenizer import Tokenizer
TOKEN_PAD = '' # Token for padding
TOKEN_UNK = '[UNK]' # Token for unknown words
TOKEN_CLS = '[CLS]' # Token for classification
TOKEN_SEP = '[SEP]' # Token for separation
TOKEN_MASK = '[MASK]' # Token for masking
class BertTokenizer(Tokenizer):
"""
Bert Like Tokenizer, ref: https://github.com/CyberZHG/keras-bert/blob/master/keras_bert/tokenizer.py
"""
def __init__(self,
token_dict=None,
token_cls=TOKEN_CLS,
token_sep=TOKEN_SEP,
token_unk=TOKEN_UNK,
pad_index=0,
cased=False):
"""Initialize tokenizer.
:param token_dict: A dict maps tokens to indices.
:param token_cls: The token represents classification.
:param token_sep: The token represents separator.
:param token_unk: The token represents unknown token.
:param pad_index: The index to pad.
:param cased: Whether to keep the case.
"""
self._token_dict = token_dict
if self._token_dict:
self._token_dict_inv = {v: k for k, v in token_dict.items()}
else:
self._token_dict_inv = {}
self._token_cls = token_cls
self._token_sep = token_sep
self._token_unk = token_unk
self._pad_index = pad_index
self._cased = cased
@classmethod
def load_from_model(cls, model_path: str):
dict_path = os.path.join(model_path, 'vocab.txt')
token2idx = {}
with codecs.open(dict_path, 'r', 'utf8') as reader:
for line in reader:
token = line.strip()
token2idx[token] = len(token2idx)
return BertTokenizer(token_dict=token2idx)
@classmethod
def load_from_vacab_file(cls, vacab_path: str):
token2idx = {}
with codecs.open(vacab_path, 'r', 'utf8') as reader:
for line in reader:
token = line.strip()
token2idx[token] = len(token2idx)
return BertTokenizer(token_dict=token2idx)
def tokenize(self, first):
"""Split text to tokens.
:param first: First text.
:param second: Second text.
:return: A list of strings.
"""
tokens = self._tokenize(first)
return tokens
def _tokenize(self, text):
if not self._cased:
text = unicodedata.normalize('NFD', text)
text = ''.join([ch for ch in text if unicodedata.category(ch) != 'Mn'])
text = text.lower()
spaced = ''
for ch in text:
if self._is_punctuation(ch) or self._is_cjk_character(ch):
spaced += ' ' + ch + ' '
elif self._is_space(ch):
spaced += ' '
elif ord(ch) == 0 or ord(ch) == 0xfffd or self._is_control(ch):
continue
else:
spaced += ch
if self._token_dict:
tokens = []
for word in spaced.strip().split():
tokens += self._word_piece_tokenize(word)
return tokens
else:
return spaced.strip().split()
def _word_piece_tokenize(self, word):
if word in self._token_dict:
return [word]
tokens = []
start, stop = 0, 0
while start < len(word):
stop = len(word)
while stop > start:
sub = word[start:stop]
if start > 0:
sub = '##' + sub
if sub in self._token_dict:
break
stop -= 1
if start == stop:
stop += 1
tokens.append(sub)
start = stop
return tokens
@staticmethod
def _is_punctuation(ch): # noqa: E127
code = ord(ch)
return 33 <= code <= 47 or \
58 <= code <= 64 or \
91 <= code <= 96 or \
123 <= code <= 126 or \
unicodedata.category(ch).startswith('P')
@staticmethod
def _is_cjk_character(ch):
code = ord(ch)
return 0x4E00 <= code <= 0x9FFF or \
0x3400 <= code <= 0x4DBF or \
0x20000 <= code <= 0x2A6DF or \
0x2A700 <= code <= 0x2B73F or \
0x2B740 <= code <= 0x2B81F or \
0x2B820 <= code <= 0x2CEAF or \
0xF900 <= code <= 0xFAFF or \
0x2F800 <= code <= 0x2FA1F
@staticmethod
def _is_space(ch):
return ch == ' ' or ch == '\n' or ch == '\r' or ch == '\t' or unicodedata.category(ch) == 'Zs'
@staticmethod
def _is_control(ch):
return unicodedata.category(ch) in ('Cc', 'Cf')
| 125softNLP | /125softNLP-0.0.1-py3-none-any.whl/kashgari/tokenizer/bert_tokenizer.py | bert_tokenizer.py |
# encoding: utf-8
# author: BrikerMan
# contact: eliyar917@gmail.com
# blog: https://eliyar.biz
# file: jieba_tokenizer.py
# time: 11:54 上午
from kashgari.tokenizer.base_tokenizer import Tokenizer
class JiebaTokenizer(Tokenizer):
"""Jieba tokenizer
"""
def __init__(self):
try:
import jieba
self._jieba = jieba
except ModuleNotFoundError:
raise ModuleNotFoundError("Jieba module not found, please install use `pip install jieba`")
def tokenize(self, text: str, **kwargs):
"""
Tokenize text into token sequence
Args:
text: target text sample
Returns:
List of tokens in this sample
"""
return list(self._jieba.cut(text, **kwargs))
| 125softNLP | /125softNLP-0.0.1-py3-none-any.whl/kashgari/tokenizer/jieba_tokenizer.py | jieba_tokenizer.py |
# encoding: utf-8
# author: BrikerMan
# contact: eliyar917@gmail.com
# blog: https://eliyar.biz
# file: __init__.py
# time: 11:34 上午
from kashgari.tokenizer.base_tokenizer import Tokenizer
from kashgari.tokenizer.bert_tokenizer import BertTokenizer
from kashgari.tokenizer.jieba_tokenizer import JiebaTokenizer
| 125softNLP | /125softNLP-0.0.1-py3-none-any.whl/kashgari/tokenizer/__init__.py | __init__.py |
# encoding: utf-8
# author: BrikerMan
# contact: eliyar917@gmail.com
# blog: https://eliyar.biz
# file: base_model.py
# time: 2019-05-22 11:21
import os
import json
import logging
import warnings
import pathlib
from typing import Dict, Any, List, Optional, Union, Tuple
import numpy as np
import tensorflow as tf
from tensorflow import keras
import kashgari
from kashgari import utils
from kashgari.embeddings import BareEmbedding
from kashgari.embeddings.base_embedding import Embedding
L = keras.layers
class BaseModel(object):
"""Base Sequence Labeling Model"""
@classmethod
def get_default_hyper_parameters(cls) -> Dict[str, Dict[str, Any]]:
raise NotImplementedError
def info(self):
model_json_str = self.tf_model.to_json()
return {
'config': {
'hyper_parameters': self.hyper_parameters,
},
'tf_model': json.loads(model_json_str),
'embedding': self.embedding.info(),
'class_name': self.__class__.__name__,
'module': self.__class__.__module__,
'tf_version': tf.__version__,
'kashgari_version': kashgari.__version__
}
@property
def task(self):
return self.embedding.task
@property
def token2idx(self) -> Dict[str, int]:
return self.embedding.token2idx
@property
def label2idx(self) -> Dict[str, int]:
return self.embedding.label2idx
@property
def pre_processor(self):
warnings.warn("The 'pre_processor' property is deprecated, "
"use 'processor' instead", DeprecationWarning, 2)
"""Deprecated. Use `self.processor` instead."""
return self.embedding.processor
@property
def processor(self):
return self.embedding.processor
def __init__(self,
embedding: Optional[Embedding] = None,
hyper_parameters: Optional[Dict[str, Dict[str, Any]]] = None):
"""
Args:
embedding: model embedding
hyper_parameters: a dict of hyper_parameters.
Examples:
You could change customize hyper_parameters like this::
# get default hyper_parameters
hyper_parameters = BLSTMModel.get_default_hyper_parameters()
# change lstm hidden unit to 12
hyper_parameters['layer_blstm']['units'] = 12
# init new model with customized hyper_parameters
labeling_model = BLSTMModel(hyper_parameters=hyper_parameters)
labeling_model.fit(x, y)
"""
if embedding is None:
self.embedding = BareEmbedding(task=self.__task__)
else:
self.embedding = embedding
self.tf_model: keras.Model = None
self.hyper_parameters = self.get_default_hyper_parameters()
self.model_info = {}
if hyper_parameters:
self.hyper_parameters.update(hyper_parameters)
def build_model(self,
x_train: Union[Tuple[List[List[str]], ...], List[List[str]]],
y_train: Union[List[List[str]], List[str]],
x_validate: Union[Tuple[List[List[str]], ...], List[List[str]]] = None,
y_validate: Union[List[List[str]], List[str]] = None):
"""
Build model with corpus
Args:
x_train: Array of train feature data (if the model has a single input),
or tuple of train feature data array (if the model has multiple inputs)
y_train: Array of train label data
x_validate: Array of validation feature data (if the model has a single input),
or tuple of validation feature data array (if the model has multiple inputs)
y_validate: Array of validation label data
Returns:
"""
if x_validate is not None and not isinstance(x_validate, tuple):
self.embedding.analyze_corpus(x_train + x_validate, y_train + y_validate)
else:
self.embedding.analyze_corpus(x_train, y_train)
if self.tf_model is None:
self.build_model_arc()
self.compile_model()
def build_multi_gpu_model(self,
gpus: int,
x_train: Union[Tuple[List[List[str]], ...], List[List[str]]],
y_train: Union[List[List[str]], List[str]],
cpu_merge: bool = True,
cpu_relocation: bool = False,
x_validate: Union[Tuple[List[List[str]], ...], List[List[str]]] = None,
y_validate: Union[List[List[str]], List[str]] = None):
"""
Build multi-GPU model with corpus
Args:
gpus: Integer >= 2, number of on GPUs on which to create model replicas.
cpu_merge: A boolean value to identify whether to force merging model weights
under the scope of the CPU or not.
cpu_relocation: A boolean value to identify whether to create the model's weights
under the scope of the CPU. If the model is not defined under any preceding device
scope, you can still rescue it by activating this option.
x_train: Array of train feature data (if the model has a single input),
or tuple of train feature data array (if the model has multiple inputs)
y_train: Array of train label data
x_validate: Array of validation feature data (if the model has a single input),
or tuple of validation feature data array (if the model has multiple inputs)
y_validate: Array of validation label data
Returns:
"""
if x_validate is not None and not isinstance(x_validate, tuple):
self.embedding.analyze_corpus(x_train + x_validate, y_train + y_validate)
else:
self.embedding.analyze_corpus(x_train, y_train)
if self.tf_model is None:
with utils.custom_object_scope():
self.build_model_arc()
self.tf_model = tf.keras.utils.multi_gpu_model(self.tf_model,
gpus,
cpu_merge=cpu_merge,
cpu_relocation=cpu_relocation)
self.compile_model()
def build_tpu_model(self, strategy: tf.contrib.distribute.TPUStrategy,
x_train: Union[Tuple[List[List[str]], ...], List[List[str]]],
y_train: Union[List[List[str]], List[str]],
x_validate: Union[Tuple[List[List[str]], ...], List[List[str]]] = None,
y_validate: Union[List[List[str]], List[str]] = None):
"""
Build TPU model with corpus
Args:
strategy: `TPUDistributionStrategy`. The strategy to use for replicating model
across multiple TPU cores.
x_train: Array of train feature data (if the model has a single input),
or tuple of train feature data array (if the model has multiple inputs)
y_train: Array of train label data
x_validate: Array of validation feature data (if the model has a single input),
or tuple of validation feature data array (if the model has multiple inputs)
y_validate: Array of validation label data
Returns:
"""
if x_validate is not None and not isinstance(x_validate, tuple):
self.embedding.analyze_corpus(x_train + x_validate, y_train + y_validate)
else:
self.embedding.analyze_corpus(x_train, y_train)
if self.tf_model is None:
with utils.custom_object_scope():
self.build_model_arc()
self.tf_model = tf.contrib.tpu.keras_to_tpu_model(self.tf_model, strategy=strategy)
self.compile_model(optimizer=tf.train.AdamOptimizer())
def get_data_generator(self,
x_data,
y_data,
batch_size: int = 64,
shuffle: bool = True):
"""
data generator for fit_generator
Args:
x_data: Array of feature data (if the model has a single input),
or tuple of feature data array (if the model has multiple inputs)
y_data: Array of label data
batch_size: Number of samples per gradient update, default to 64.
shuffle:
Returns:
data generator
"""
index_list = np.arange(len(x_data))
page_count = len(x_data) // batch_size + 1
while True:
if shuffle:
np.random.shuffle(index_list)
for page in range(page_count):
start_index = page * batch_size
end_index = start_index + batch_size
target_index = index_list[start_index: end_index]
if len(target_index) == 0:
target_index = index_list[0: batch_size]
x_tensor = self.embedding.process_x_dataset(x_data,
target_index)
y_tensor = self.embedding.process_y_dataset(y_data,
target_index)
yield (x_tensor, y_tensor)
def fit(self,
x_train: Union[Tuple[List[List[str]], ...], List[List[str]]],
y_train: Union[List[List[str]], List[str]],
x_validate: Union[Tuple[List[List[str]], ...], List[List[str]]] = None,
y_validate: Union[List[List[str]], List[str]] = None,
batch_size: int = 64,
epochs: int = 5,
callbacks: List[keras.callbacks.Callback] = None,
fit_kwargs: Dict = None,
shuffle: bool = True):
"""
Trains the model for a given number of epochs with fit_generator (iterations on a dataset).
Args:
x_train: Array of train feature data (if the model has a single input),
or tuple of train feature data array (if the model has multiple inputs)
y_train: Array of train label data
x_validate: Array of validation feature data (if the model has a single input),
or tuple of validation feature data array (if the model has multiple inputs)
y_validate: Array of validation label data
batch_size: Number of samples per gradient update, default to 64.
epochs: Integer. Number of epochs to train the model. default 5.
callbacks:
fit_kwargs: fit_kwargs: additional arguments passed to ``fit_generator()`` function from
``tensorflow.keras.Model``
- https://www.tensorflow.org/api_docs/python/tf/keras/models/Model#fit_generator
shuffle:
Returns:
"""
self.build_model(x_train, y_train, x_validate, y_validate)
train_generator = self.get_data_generator(x_train,
y_train,
batch_size,
shuffle)
if fit_kwargs is None:
fit_kwargs = {}
validation_generator = None
validation_steps = None
if x_validate:
validation_generator = self.get_data_generator(x_validate,
y_validate,
batch_size,
shuffle)
if isinstance(x_validate, tuple):
validation_steps = len(x_validate[0]) // batch_size + 1
else:
validation_steps = len(x_validate) // batch_size + 1
if isinstance(x_train, tuple):
steps_per_epoch = len(x_train[0]) // batch_size + 1
else:
steps_per_epoch = len(x_train) // batch_size + 1
with utils.custom_object_scope():
return self.tf_model.fit_generator(train_generator,
steps_per_epoch=steps_per_epoch,
epochs=epochs,
validation_data=validation_generator,
validation_steps=validation_steps,
callbacks=callbacks,
**fit_kwargs)
def fit_without_generator(self,
x_train: Union[Tuple[List[List[str]], ...], List[List[str]]],
y_train: Union[List[List[str]], List[str]],
x_validate: Union[Tuple[List[List[str]], ...], List[List[str]]] = None,
y_validate: Union[List[List[str]], List[str]] = None,
batch_size: int = 64,
epochs: int = 5,
callbacks: List[keras.callbacks.Callback] = None,
fit_kwargs: Dict = None):
"""
Trains the model for a given number of epochs (iterations on a dataset).
Args:
x_train: Array of train feature data (if the model has a single input),
or tuple of train feature data array (if the model has multiple inputs)
y_train: Array of train label data
x_validate: Array of validation feature data (if the model has a single input),
or tuple of validation feature data array (if the model has multiple inputs)
y_validate: Array of validation label data
batch_size: Number of samples per gradient update, default to 64.
epochs: Integer. Number of epochs to train the model. default 5.
callbacks:
fit_kwargs: fit_kwargs: additional arguments passed to ``fit_generator()`` function from
``tensorflow.keras.Model``
- https://www.tensorflow.org/api_docs/python/tf/keras/models/Model#fit_generator
Returns:
"""
self.build_model(x_train, y_train, x_validate, y_validate)
tensor_x = self.embedding.process_x_dataset(x_train)
tensor_y = self.embedding.process_y_dataset(y_train)
validation_data = None
if x_validate is not None:
tensor_valid_x = self.embedding.process_x_dataset(x_validate)
tensor_valid_y = self.embedding.process_y_dataset(y_validate)
validation_data = (tensor_valid_x, tensor_valid_y)
if fit_kwargs is None:
fit_kwargs = {}
if callbacks and 'callbacks' not in fit_kwargs:
fit_kwargs['callbacks'] = callbacks
with utils.custom_object_scope():
return self.tf_model.fit(tensor_x, tensor_y,
validation_data=validation_data,
epochs=epochs,
batch_size=batch_size,
**fit_kwargs)
def compile_model(self, **kwargs):
"""Configures the model for training.
Using ``compile()`` function of ``tf.keras.Model`` -
https://www.tensorflow.org/api_docs/python/tf/keras/models/Model#compile
Args:
**kwargs: arguments passed to ``compile()`` function of ``tf.keras.Model``
Defaults:
- loss: ``categorical_crossentropy``
- optimizer: ``adam``
- metrics: ``['accuracy']``
"""
if kwargs.get('loss') is None:
kwargs['loss'] = 'categorical_crossentropy'
if kwargs.get('optimizer') is None:
kwargs['optimizer'] = 'adam'
if kwargs.get('metrics') is None:
kwargs['metrics'] = ['accuracy']
self.tf_model.compile(**kwargs)
if not kashgari.config.disable_auto_summary:
self.tf_model.summary()
def predict(self,
x_data,
batch_size=32,
debug_info=False,
predict_kwargs: Dict = None):
"""
Generates output predictions for the input samples.
Computation is done in batches.
Args:
x_data: The input data, as a Numpy array (or list of Numpy arrays if the model has multiple inputs).
batch_size: Integer. If unspecified, it will default to 32.
debug_info: Bool, Should print out the logging info.
predict_kwargs: arguments passed to ``predict()`` function of ``tf.keras.Model``
Returns:
array(s) of predictions.
"""
if predict_kwargs is None:
predict_kwargs = {}
with utils.custom_object_scope():
if isinstance(x_data, tuple):
lengths = [len(sen) for sen in x_data[0]]
else:
lengths = [len(sen) for sen in x_data]
tensor = self.embedding.process_x_dataset(x_data)
pred = self.tf_model.predict(tensor, batch_size=batch_size, **predict_kwargs)
if self.task == 'scoring':
t_pred = pred
else:
t_pred = pred.argmax(-1)
res = self.embedding.reverse_numerize_label_sequences(t_pred,
lengths)
if debug_info:
print('input: {}'.format(tensor))
print('output: {}'.format(pred))
print('output argmax: {}'.format(t_pred))
return res
def evaluate(self,
x_data,
y_data,
batch_size=None,
digits=4,
debug_info=False) -> Tuple[float, float, Dict]:
"""
Evaluate model
Args:
x_data:
y_data:
batch_size:
digits:
debug_info:
Returns:
"""
raise NotImplementedError
def build_model_arc(self):
raise NotImplementedError
def save(self, model_path: str):
"""
Save model
Args:
model_path:
Returns:
"""
pathlib.Path(model_path).mkdir(exist_ok=True, parents=True)
with open(os.path.join(model_path, 'model_info.json'), 'w') as f:
f.write(json.dumps(self.info(), indent=2, ensure_ascii=True))
f.close()
self.tf_model.save_weights(os.path.join(model_path, 'model_weights.h5'))
logging.info('model saved to {}'.format(os.path.abspath(model_path)))
if __name__ == "__main__":
from kashgari.tasks.labeling import CNN_LSTM_Model
from kashgari.corpus import ChineseDailyNerCorpus
train_x, train_y = ChineseDailyNerCorpus.load_data('valid')
model = CNN_LSTM_Model()
model.build_model(train_x[:100], train_y[:100])
r = model.predict_entities(train_x[:5])
model.save('./res')
import pprint
pprint.pprint(r)
model.evaluate(train_x[:20], train_y[:20])
print("Hello world")
print(model.predict(train_x[:20]))
| 125softNLP | /125softNLP-0.0.1-py3-none-any.whl/kashgari/tasks/base_model.py | base_model.py |
# encoding: utf-8
# author: BrikerMan
# contact: eliyar917@gmail.com
# blog: https://eliyar.biz
# file: __init__.py
# time: 2019-05-20 11:34
from kashgari.tasks import labeling
from kashgari.tasks import classification
if __name__ == "__main__":
print("Hello world")
| 125softNLP | /125softNLP-0.0.1-py3-none-any.whl/kashgari/tasks/__init__.py | __init__.py |
# encoding: utf-8
# author: BrikerMan
# contact: eliyar917@gmail.com
# blog: https://eliyar.biz
# file: base_model.py
# time: 2019-05-20 13:07
from typing import Dict, Any, Tuple
import random
import logging
from seqeval.metrics import classification_report
from seqeval.metrics.sequence_labeling import get_entities
from kashgari.tasks.base_model import BaseModel
class BaseLabelingModel(BaseModel):
"""Base Sequence Labeling Model"""
__task__ = 'labeling'
@classmethod
def get_default_hyper_parameters(cls) -> Dict[str, Dict[str, Any]]:
raise NotImplementedError
def predict_entities(self,
x_data,
batch_size=None,
join_chunk=' ',
debug_info=False,
predict_kwargs: Dict = None):
"""Gets entities from sequence.
Args:
x_data: The input data, as a Numpy array (or list of Numpy arrays if the model has multiple inputs).
batch_size: Integer. If unspecified, it will default to 32.
join_chunk: str or False,
debug_info: Bool, Should print out the logging info.
predict_kwargs: arguments passed to ``predict()`` function of ``tf.keras.Model``
Returns:
list: list of entity.
"""
if isinstance(x_data, tuple):
text_seq = x_data[0]
else:
text_seq = x_data
res = self.predict(x_data, batch_size, debug_info, predict_kwargs)
new_res = [get_entities(seq) for seq in res]
final_res = []
for index, seq in enumerate(new_res):
seq_data = []
for entity in seq:
if join_chunk is False:
value = text_seq[index][entity[1]:entity[2] + 1],
else:
value = join_chunk.join(text_seq[index][entity[1]:entity[2] + 1])
seq_data.append({
"entity": entity[0],
"start": entity[1],
"end": entity[2],
"value": value,
})
final_res.append({
'text': join_chunk.join(text_seq[index]),
'text_raw': text_seq[index],
'labels': seq_data
})
return final_res
def evaluate(self,
x_data,
y_data,
batch_size=None,
digits=4,
debug_info=False) -> Tuple[float, float, Dict]:
"""
Build a text report showing the main classification metrics.
Args:
x_data:
y_data:
batch_size:
digits:
debug_info:
Returns:
"""
y_pred = self.predict(x_data, batch_size=batch_size)
y_true = [seq[:len(y_pred[index])] for index, seq in enumerate(y_data)]
new_y_pred = []
for x in y_pred:
new_y_pred.append([str(i) for i in x])
new_y_true = []
for x in y_true:
new_y_true.append([str(i) for i in x])
if debug_info:
for index in random.sample(list(range(len(x_data))), 5):
logging.debug('------ sample {} ------'.format(index))
logging.debug('x : {}'.format(x_data[index]))
logging.debug('y_true : {}'.format(y_true[index]))
logging.debug('y_pred : {}'.format(y_pred[index]))
report = classification_report(y_true, y_pred, digits=digits)
print(classification_report(y_true, y_pred, digits=digits))
return report
def build_model_arc(self):
raise NotImplementedError
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG)
from kashgari.tasks.labeling import BiLSTM_Model
from kashgari.corpus import ChineseDailyNerCorpus
from kashgari.utils import load_model
train_x, train_y = ChineseDailyNerCorpus.load_data('train', shuffle=False)
valid_x, valid_y = ChineseDailyNerCorpus.load_data('valid')
train_x, train_y = train_x[:5120], train_y[:5120]
model = load_model('/Users/brikerman/Desktop/blstm_model')
# model.build_model(train_x[:100], train_y[:100])
# model.fit(train_x[:1000], train_y[:1000], epochs=10)
# model.evaluate(train_x[:20], train_y[:20])
print("Hello world")
| 125softNLP | /125softNLP-0.0.1-py3-none-any.whl/kashgari/tasks/labeling/base_model.py | base_model.py |
# encoding: utf-8
# author: BrikerMan
# contact: eliyar917@gmail.com
# blog: https://eliyar.biz
# file: experimental.py
# time: 2019-05-22 19:35
from typing import Dict, Any
from tensorflow import keras
import kashgari
from kashgari.tasks.labeling.base_model import BaseLabelingModel
from kashgari.layers import L
from keras_self_attention import SeqSelfAttention
class BLSTMAttentionModel(BaseLabelingModel):
"""Bidirectional LSTM Self Attention Sequence Labeling Model"""
@classmethod
def get_default_hyper_parameters(cls) -> Dict[str, Dict[str, Any]]:
"""
Get hyper parameters of model
Returns:
hyper parameters dict
"""
return {
'layer_blstm': {
'units': 64,
'return_sequences': True
},
'layer_self_attention': {
'attention_activation': 'sigmoid'
},
'layer_dropout': {
'rate': 0.5
},
'layer_time_distributed': {},
'layer_activation': {
'activation': 'softmax'
}
}
def build_model_arc(self):
"""
build model architectural
"""
output_dim = len(self.processor.label2idx)
config = self.hyper_parameters
embed_model = self.embedding.embed_model
layer_blstm = L.Bidirectional(L.LSTM(**config['layer_blstm']),
name='layer_blstm')
layer_self_attention = SeqSelfAttention(**config['layer_self_attention'],
name='layer_self_attention')
layer_dropout = L.Dropout(**config['layer_dropout'],
name='layer_dropout')
layer_time_distributed = L.TimeDistributed(L.Dense(output_dim,
**config['layer_time_distributed']),
name='layer_time_distributed')
layer_activation = L.Activation(**config['layer_activation'])
tensor = layer_blstm(embed_model.output)
tensor = layer_self_attention(tensor)
tensor = layer_dropout(tensor)
tensor = layer_time_distributed(tensor)
output_tensor = layer_activation(tensor)
self.tf_model = keras.Model(embed_model.inputs, output_tensor)
# Register custom layer
kashgari.custom_objects['SeqSelfAttention'] = SeqSelfAttention
if __name__ == "__main__":
print("Hello world")
| 125softNLP | /125softNLP-0.0.1-py3-none-any.whl/kashgari/tasks/labeling/experimental.py | experimental.py |
# encoding: utf-8
# author: BrikerMan
# contact: eliyar917@gmail.com
# blog: https://eliyar.biz
# file: __init__.py
# time: 2019-05-20 11:34
from kashgari.tasks.labeling.models import CNN_LSTM_Model
from kashgari.tasks.labeling.models import BiLSTM_Model
from kashgari.tasks.labeling.models import BiLSTM_CRF_Model
from kashgari.tasks.labeling.models import BiGRU_Model
from kashgari.tasks.labeling.models import BiGRU_CRF_Model
if __name__ == "__main__":
print("Hello world")
| 125softNLP | /125softNLP-0.0.1-py3-none-any.whl/kashgari/tasks/labeling/__init__.py | __init__.py |
# encoding: utf-8
# author: BrikerMan
# contact: eliyar917@gmail.com
# blog: https://eliyar.biz
# file: models.py
# time: 2019-05-20 11:13
import logging
from typing import Dict, Any
from tensorflow import keras
from kashgari.tasks.labeling.base_model import BaseLabelingModel
from kashgari.layers import L
from kashgari.layers.crf import CRF
from kashgari.utils import custom_objects
custom_objects['CRF'] = CRF
class BiLSTM_Model(BaseLabelingModel):
"""Bidirectional LSTM Sequence Labeling Model"""
@classmethod
def get_default_hyper_parameters(cls) -> Dict[str, Dict[str, Any]]:
"""
Get hyper parameters of model
Returns:
hyper parameters dict
"""
return {
'layer_blstm': {
'units': 128,
'return_sequences': True
},
'layer_dropout': {
'rate': 0.4
},
'layer_time_distributed': {},
'layer_activation': {
'activation': 'softmax'
}
}
def build_model_arc(self):
"""
build model architectural
"""
output_dim = len(self.processor.label2idx)
config = self.hyper_parameters
embed_model = self.embedding.embed_model
layer_blstm = L.Bidirectional(L.LSTM(**config['layer_blstm']),
name='layer_blstm')
layer_dropout = L.Dropout(**config['layer_dropout'],
name='layer_dropout')
layer_time_distributed = L.TimeDistributed(L.Dense(output_dim,
**config['layer_time_distributed']),
name='layer_time_distributed')
layer_activation = L.Activation(**config['layer_activation'])
tensor = layer_blstm(embed_model.output)
tensor = layer_dropout(tensor)
tensor = layer_time_distributed(tensor)
output_tensor = layer_activation(tensor)
self.tf_model = keras.Model(embed_model.inputs, output_tensor)
class BiLSTM_CRF_Model(BaseLabelingModel):
"""Bidirectional LSTM CRF Sequence Labeling Model"""
@classmethod
def get_default_hyper_parameters(cls) -> Dict[str, Dict[str, Any]]:
"""
Get hyper parameters of model
Returns:
hyper parameters dict
"""
return {
'layer_blstm': {
'units': 128,
'return_sequences': True
},
'layer_dense': {
'units': 64,
'activation': 'tanh'
}
}
def build_model_arc(self):
"""
build model architectural
"""
output_dim = len(self.processor.label2idx)
config = self.hyper_parameters
embed_model = self.embedding.embed_model
layer_blstm = L.Bidirectional(L.LSTM(**config['layer_blstm']),
name='layer_blstm')
layer_dense = L.Dense(**config['layer_dense'], name='layer_dense')
layer_crf_dense = L.Dense(output_dim, name='layer_crf_dense')
layer_crf = CRF(output_dim, name='layer_crf')
tensor = layer_blstm(embed_model.output)
tensor = layer_dense(tensor)
tensor = layer_crf_dense(tensor)
output_tensor = layer_crf(tensor)
self.layer_crf = layer_crf
self.tf_model = keras.Model(embed_model.inputs, output_tensor)
def compile_model(self, **kwargs):
if kwargs.get('loss') is None:
kwargs['loss'] = self.layer_crf.loss
if kwargs.get('metrics') is None:
kwargs['metrics'] = [self.layer_crf.viterbi_accuracy]
super(BiLSTM_CRF_Model, self).compile_model(**kwargs)
class BiGRU_Model(BaseLabelingModel):
"""Bidirectional GRU Sequence Labeling Model"""
@classmethod
def get_default_hyper_parameters(cls) -> Dict[str, Dict[str, Any]]:
"""
Get hyper parameters of model
Returns:
hyper parameters dict
"""
return {
'layer_bgru': {
'units': 128,
'return_sequences': True
},
'layer_dropout': {
'rate': 0.4
},
'layer_time_distributed': {},
'layer_activation': {
'activation': 'softmax'
}
}
def build_model_arc(self):
"""
build model architectural
"""
output_dim = len(self.processor.label2idx)
config = self.hyper_parameters
embed_model = self.embedding.embed_model
layer_blstm = L.Bidirectional(L.GRU(**config['layer_bgru']),
name='layer_bgru')
layer_dropout = L.Dropout(**config['layer_dropout'],
name='layer_dropout')
layer_time_distributed = L.TimeDistributed(L.Dense(output_dim,
**config['layer_time_distributed']),
name='layer_time_distributed')
layer_activation = L.Activation(**config['layer_activation'])
tensor = layer_blstm(embed_model.output)
tensor = layer_dropout(tensor)
tensor = layer_time_distributed(tensor)
output_tensor = layer_activation(tensor)
self.tf_model = keras.Model(embed_model.inputs, output_tensor)
class BiGRU_CRF_Model(BaseLabelingModel):
"""Bidirectional GRU CRF Sequence Labeling Model"""
@classmethod
def get_default_hyper_parameters(cls) -> Dict[str, Dict[str, Any]]:
"""
Get hyper parameters of model
Returns:
hyper parameters dict
"""
return {
'layer_bgru': {
'units': 128,
'return_sequences': True
},
'layer_dense': {
'units': 64,
'activation': 'tanh'
}
}
def build_model_arc(self):
"""
build model architectural
"""
output_dim = len(self.processor.label2idx)
config = self.hyper_parameters
embed_model = self.embedding.embed_model
layer_blstm = L.Bidirectional(L.GRU(**config['layer_bgru']),
name='layer_bgru')
layer_dense = L.Dense(**config['layer_dense'], name='layer_dense')
layer_crf_dense = L.Dense(output_dim, name='layer_crf_dense')
layer_crf = CRF(output_dim, name='layer_crf')
tensor = layer_blstm(embed_model.output)
tensor = layer_dense(tensor)
tensor = layer_crf_dense(tensor)
output_tensor = layer_crf(tensor)
self.layer_crf = layer_crf
self.tf_model = keras.Model(embed_model.inputs, output_tensor)
def compile_model(self, **kwargs):
if kwargs.get('loss') is None:
kwargs['loss'] = self.layer_crf.loss
if kwargs.get('metrics') is None:
kwargs['metrics'] = [self.layer_crf.viterbi_accuracy]
super(BiGRU_CRF_Model, self).compile_model(**kwargs)
class CNN_LSTM_Model(BaseLabelingModel):
"""CNN LSTM Sequence Labeling Model"""
@classmethod
def get_default_hyper_parameters(cls) -> Dict[str, Dict[str, Any]]:
"""
Get hyper parameters of model
Returns:
hyper parameters dict
"""
return {
'layer_conv': {
'filters': 32,
'kernel_size': 3,
'padding': 'same',
'activation': 'relu'
},
'layer_lstm': {
'units': 128,
'return_sequences': True
},
'layer_dropout': {
'rate': 0.4
},
'layer_time_distributed': {},
'layer_activation': {
'activation': 'softmax'
}
}
def build_model_arc(self):
"""
build model architectural
"""
output_dim = len(self.processor.label2idx)
config = self.hyper_parameters
embed_model = self.embedding.embed_model
layer_conv = L.Conv1D(**config['layer_conv'],
name='layer_conv')
layer_lstm = L.LSTM(**config['layer_lstm'],
name='layer_lstm')
layer_dropout = L.Dropout(**config['layer_dropout'],
name='layer_dropout')
layer_time_distributed = L.TimeDistributed(L.Dense(output_dim,
**config['layer_time_distributed']),
name='layer_time_distributed')
layer_activation = L.Activation(**config['layer_activation'])
tensor = layer_conv(embed_model.output)
tensor = layer_lstm(tensor)
tensor = layer_dropout(tensor)
tensor = layer_time_distributed(tensor)
output_tensor = layer_activation(tensor)
self.tf_model = keras.Model(embed_model.inputs, output_tensor)
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG)
from kashgari.corpus import ChineseDailyNerCorpus
valid_x, valid_y = ChineseDailyNerCorpus.load_data('train')
model = BiLSTM_CRF_Model()
model.fit(valid_x, valid_y, epochs=50, batch_size=64)
model.evaluate(valid_x, valid_y)
| 125softNLP | /125softNLP-0.0.1-py3-none-any.whl/kashgari/tasks/labeling/models.py | models.py |
# encoding: utf-8
# author: BrikerMan
# contact: eliyar917@gmail.com
# blog: https://eliyar.biz
# file: base_classification_model.py
# time: 2019-05-22 11:23
import random
import logging
import kashgari
from typing import Dict, Any, Tuple, Optional, List
from kashgari.tasks.base_model import BaseModel, BareEmbedding
from kashgari.embeddings.base_embedding import Embedding
from sklearn import metrics
class BaseClassificationModel(BaseModel):
__task__ = 'classification'
def __init__(self,
embedding: Optional[Embedding] = None,
hyper_parameters: Optional[Dict[str, Dict[str, Any]]] = None):
super(BaseClassificationModel, self).__init__(embedding, hyper_parameters)
if hyper_parameters is None and \
self.embedding.processor.__getattribute__('multi_label') is True:
last_layer_name = list(self.hyper_parameters.keys())[-1]
self.hyper_parameters[last_layer_name]['activation'] = 'sigmoid'
logging.warning("Activation Layer's activate function changed to sigmoid for"
" multi-label classification question")
@classmethod
def get_default_hyper_parameters(cls) -> Dict[str, Dict[str, Any]]:
raise NotImplementedError
def build_model_arc(self):
raise NotImplementedError
def compile_model(self, **kwargs):
if kwargs.get('loss') is None and self.embedding.processor.multi_label:
kwargs['loss'] = 'binary_crossentropy'
super(BaseClassificationModel, self).compile_model(**kwargs)
def predict(self,
x_data,
batch_size=32,
multi_label_threshold: float = 0.5,
debug_info=False,
predict_kwargs: Dict = None):
"""
Generates output predictions for the input samples.
Computation is done in batches.
Args:
x_data: The input data, as a Numpy array (or list of Numpy arrays if the model has multiple inputs).
batch_size: Integer. If unspecified, it will default to 32.
multi_label_threshold:
debug_info: Bool, Should print out the logging info.
predict_kwargs: arguments passed to ``predict()`` function of ``tf.keras.Model``
Returns:
array(s) of predictions.
"""
with kashgari.utils.custom_object_scope():
tensor = self.embedding.process_x_dataset(x_data)
pred = self.tf_model.predict(tensor, batch_size=batch_size)
if self.embedding.processor.multi_label:
if debug_info:
logging.info('raw output: {}'.format(pred))
pred[pred >= multi_label_threshold] = 1
pred[pred < multi_label_threshold] = 0
else:
pred = pred.argmax(-1)
res = self.embedding.reverse_numerize_label_sequences(pred)
if debug_info:
logging.info('input: {}'.format(tensor))
logging.info('output: {}'.format(pred))
logging.info('output argmax: {}'.format(pred.argmax(-1)))
return res
def predict_top_k_class(self,
x_data,
top_k=5,
batch_size=32,
debug_info=False,
predict_kwargs: Dict = None) -> List[Dict]:
"""
Generates output predictions with confidence for the input samples.
Computation is done in batches.
Args:
x_data: The input data, as a Numpy array (or list of Numpy arrays if the model has multiple inputs).
top_k: int
batch_size: Integer. If unspecified, it will default to 32.
debug_info: Bool, Should print out the logging info.
predict_kwargs: arguments passed to ``predict()`` function of ``tf.keras.Model``
Returns:
array(s) of predictions.
single-label classification:
[
{
"label": "chat",
"confidence": 0.5801531,
"candidates": [
{ "label": "cookbook", "confidence": 0.1886314 },
{ "label": "video", "confidence": 0.13805099 },
{ "label": "health", "confidence": 0.013852648 },
{ "label": "translation", "confidence": 0.012913573 }
]
}
]
multi-label classification:
[
{
"candidates": [
{ "confidence": 0.9959336, "label": "toxic" },
{ "confidence": 0.9358089, "label": "obscene" },
{ "confidence": 0.6882098, "label": "insult" },
{ "confidence": 0.13540423, "label": "severe_toxic" },
{ "confidence": 0.017219543, "label": "identity_hate" }
]
}
]
"""
if predict_kwargs is None:
predict_kwargs = {}
with kashgari.utils.custom_object_scope():
tensor = self.embedding.process_x_dataset(x_data)
pred = self.tf_model.predict(tensor, batch_size=batch_size, **predict_kwargs)
new_results = []
for sample_prob in pred:
sample_res = zip(self.label2idx.keys(), sample_prob)
sample_res = sorted(sample_res, key=lambda k: k[1], reverse=True)
data = {}
for label, confidence in sample_res[:top_k]:
if 'candidates' not in data:
if self.embedding.processor.multi_label:
data['candidates'] = []
else:
data['label'] = label
data['confidence'] = confidence
data['candidates'] = []
continue
data['candidates'].append({
'label': label,
'confidence': confidence
})
new_results.append(data)
if debug_info:
logging.info('input: {}'.format(tensor))
logging.info('output: {}'.format(pred))
logging.info('output argmax: {}'.format(pred.argmax(-1)))
return new_results
def evaluate(self,
x_data,
y_data,
batch_size=None,
digits=4,
output_dict=False,
debug_info=False) -> Optional[Tuple[float, float, Dict]]:
y_pred = self.predict(x_data, batch_size=batch_size)
if debug_info:
for index in random.sample(list(range(len(x_data))), 5):
logging.debug('------ sample {} ------'.format(index))
logging.debug('x : {}'.format(x_data[index]))
logging.debug('y : {}'.format(y_data[index]))
logging.debug('y_pred : {}'.format(y_pred[index]))
if self.processor.multi_label:
y_pred_b = self.processor.multi_label_binarizer.fit_transform(y_pred)
y_true_b = self.processor.multi_label_binarizer.fit_transform(y_data)
report = metrics.classification_report(y_pred_b,
y_true_b,
target_names=self.processor.multi_label_binarizer.classes_,
output_dict=output_dict,
digits=digits)
else:
report = metrics.classification_report(y_data,
y_pred,
output_dict=output_dict,
digits=digits)
if not output_dict:
print(report)
else:
return report
if __name__ == "__main__":
print("Hello world")
| 125softNLP | /125softNLP-0.0.1-py3-none-any.whl/kashgari/tasks/classification/base_model.py | base_model.py |
# encoding: utf-8
# author: BrikerMan
# contact: eliyar917@gmail.com
# blog: https://eliyar.biz
# file: __init__.py
# time: 2019-05-22 12:40
from kashgari.tasks.classification.models import BiLSTM_Model
from kashgari.tasks.classification.models import BiGRU_Model
from kashgari.tasks.classification.models import CNN_Model
from kashgari.tasks.classification.models import CNN_LSTM_Model
from kashgari.tasks.classification.models import CNN_GRU_Model
from kashgari.tasks.classification.models import AVCNN_Model
from kashgari.tasks.classification.models import KMax_CNN_Model
from kashgari.tasks.classification.models import R_CNN_Model
from kashgari.tasks.classification.models import AVRNN_Model
from kashgari.tasks.classification.models import Dropout_BiGRU_Model
from kashgari.tasks.classification.models import Dropout_AVRNN_Model
from kashgari.tasks.classification.dpcnn_model import DPCNN_Model
BLSTMModel = BiLSTM_Model
BGRUModel = BiGRU_Model
CNNModel = CNN_Model
CNNLSTMModel = CNN_LSTM_Model
CNNGRUModel = CNN_GRU_Model
AVCNNModel = AVCNN_Model
KMaxCNNModel = KMax_CNN_Model
RCNNModel = R_CNN_Model
AVRNNModel = AVRNN_Model
DropoutBGRUModel = Dropout_BiGRU_Model
DropoutAVRNNModel = Dropout_AVRNN_Model
DPCNN = DPCNN_Model
| 125softNLP | /125softNLP-0.0.1-py3-none-any.whl/kashgari/tasks/classification/__init__.py | __init__.py |
# encoding: utf-8
# author: BrikerMan
# contact: eliyar917@gmail.com
# blog: https://eliyar.biz
# file: models.py
# time: 2019-05-22 11:26
import logging
import tensorflow as tf
from typing import Dict, Any
from kashgari.layers import L, AttentionWeightedAverageLayer, KMaxPoolingLayer
from kashgari.tasks.classification.base_model import BaseClassificationModel
class BiLSTM_Model(BaseClassificationModel):
@classmethod
def get_default_hyper_parameters(cls) -> Dict[str, Dict[str, Any]]:
return {
'layer_bi_lstm': {
'units': 128,
'return_sequences': False
},
'layer_dense': {
'activation': 'softmax'
}
}
def build_model_arc(self):
output_dim = len(self.processor.label2idx)
config = self.hyper_parameters
embed_model = self.embedding.embed_model
layer_bi_lstm = L.Bidirectional(L.LSTM(**config['layer_bi_lstm']))
layer_dense = L.Dense(output_dim, **config['layer_dense'])
tensor = layer_bi_lstm(embed_model.output)
output_tensor = layer_dense(tensor)
self.tf_model = tf.keras.Model(embed_model.inputs, output_tensor)
class BiGRU_Model(BaseClassificationModel):
@classmethod
def get_default_hyper_parameters(cls) -> Dict[str, Dict[str, Any]]:
return {
'layer_bi_gru': {
'units': 128,
'return_sequences': False
},
'layer_dense': {
'activation': 'softmax'
}
}
def build_model_arc(self):
output_dim = len(self.processor.label2idx)
config = self.hyper_parameters
embed_model = self.embedding.embed_model
layer_bi_gru = L.Bidirectional(L.GRU(**config['layer_bi_gru']))
layer_dense = L.Dense(output_dim, **config['layer_dense'])
tensor = layer_bi_gru(embed_model.output)
output_tensor = layer_dense(tensor)
self.tf_model = tf.keras.Model(embed_model.inputs, output_tensor)
class CNN_Model(BaseClassificationModel):
@classmethod
def get_default_hyper_parameters(cls) -> Dict[str, Dict[str, Any]]:
return {
'conv1d_layer': {
'filters': 128,
'kernel_size': 5,
'activation': 'relu'
},
'max_pool_layer': {},
'dense_layer': {
'units': 64,
'activation': 'relu'
},
'activation_layer': {
'activation': 'softmax'
},
}
def build_model_arc(self):
output_dim = len(self.processor.label2idx)
config = self.hyper_parameters
embed_model = self.embedding.embed_model
# build model structure in sequent way
layers_seq = []
layers_seq.append(L.Conv1D(**config['conv1d_layer']))
layers_seq.append(L.GlobalMaxPooling1D(**config['max_pool_layer']))
layers_seq.append(L.Dense(**config['dense_layer']))
layers_seq.append(L.Dense(output_dim, **config['activation_layer']))
tensor = embed_model.output
for layer in layers_seq:
tensor = layer(tensor)
self.tf_model = tf.keras.Model(embed_model.inputs, tensor)
class CNN_LSTM_Model(BaseClassificationModel):
@classmethod
def get_default_hyper_parameters(cls) -> Dict[str, Dict[str, Any]]:
return {
'conv_layer': {
'filters': 32,
'kernel_size': 3,
'padding': 'same',
'activation': 'relu'
},
'max_pool_layer': {
'pool_size': 2
},
'lstm_layer': {
'units': 100
},
'activation_layer': {
'activation': 'softmax'
},
}
def build_model_arc(self):
output_dim = len(self.processor.label2idx)
config = self.hyper_parameters
embed_model = self.embedding.embed_model
layers_seq = []
layers_seq.append(L.Conv1D(**config['conv_layer']))
layers_seq.append(L.MaxPooling1D(**config['max_pool_layer']))
layers_seq.append(L.LSTM(**config['lstm_layer']))
layers_seq.append(L.Dense(output_dim, **config['activation_layer']))
tensor = embed_model.output
for layer in layers_seq:
tensor = layer(tensor)
self.tf_model = tf.keras.Model(embed_model.inputs, tensor)
class CNN_GRU_Model(BaseClassificationModel):
@classmethod
def get_default_hyper_parameters(cls) -> Dict[str, Dict[str, Any]]:
return {
'conv_layer': {
'filters': 32,
'kernel_size': 3,
'padding': 'same',
'activation': 'relu'
},
'max_pool_layer': {
'pool_size': 2
},
'gru_layer': {
'units': 100
},
'activation_layer': {
'activation': 'softmax'
},
}
def build_model_arc(self):
output_dim = len(self.processor.label2idx)
config = self.hyper_parameters
embed_model = self.embedding.embed_model
layers_seq = []
layers_seq.append(L.Conv1D(**config['conv_layer']))
layers_seq.append(L.MaxPooling1D(**config['max_pool_layer']))
layers_seq.append(L.GRU(**config['gru_layer']))
layers_seq.append(L.Dense(output_dim, **config['activation_layer']))
tensor = embed_model.output
for layer in layers_seq:
tensor = layer(tensor)
self.tf_model = tf.keras.Model(embed_model.inputs, tensor)
class AVCNN_Model(BaseClassificationModel):
@classmethod
def get_default_hyper_parameters(cls) -> Dict[str, Dict[str, Any]]:
return {
'spatial_dropout': {
'rate': 0.25
},
'conv_0': {
'filters': 300,
'kernel_size': 1,
'kernel_initializer': 'normal',
'padding': 'valid',
'activation': 'relu'
},
'conv_1': {
'filters': 300,
'kernel_size': 2,
'kernel_initializer': 'normal',
'padding': 'valid',
'activation': 'relu'
},
'conv_2': {
'filters': 300,
'kernel_size': 3,
'kernel_initializer': 'normal',
'padding': 'valid',
'activation': 'relu'
},
'conv_3': {
'filters': 300,
'kernel_size': 4,
'kernel_initializer': 'normal',
'padding': 'valid',
'activation': 'relu'
},
# ---
'attn_0': {},
'avg_0': {},
'maxpool_0': {},
# ---
'maxpool_1': {},
'attn_1': {},
'avg_1': {},
# ---
'maxpool_2': {},
'attn_2': {},
'avg_2': {},
# ---
'maxpool_3': {},
'attn_3': {},
'avg_3': {},
# ---
'v_col3': {
# 'mode': 'concat',
'axis': 1
},
'merged_tensor': {
# 'mode': 'concat',
'axis': 1
},
'dropout': {
'rate': 0.7
},
'dense': {
'units': 144,
'activation': 'relu'
},
'activation_layer': {
'activation': 'softmax'
},
}
def build_model_arc(self):
output_dim = len(self.processor.label2idx)
config = self.hyper_parameters
embed_model = self.embedding.embed_model
layer_embed_dropout = L.SpatialDropout1D(**config['spatial_dropout'])
layers_conv = [L.Conv1D(**config[f'conv_{i}']) for i in range(4)]
layers_sensor = []
layers_sensor.append(L.GlobalMaxPooling1D())
layers_sensor.append(AttentionWeightedAverageLayer())
layers_sensor.append(L.GlobalAveragePooling1D())
layer_view = L.Concatenate(**config['v_col3'])
layer_allviews = L.Concatenate(**config['merged_tensor'])
layers_seq = []
layers_seq.append(L.Dropout(**config['dropout']))
layers_seq.append(L.Dense(**config['dense']))
layers_seq.append(L.Dense(output_dim, **config['activation_layer']))
embed_tensor = layer_embed_dropout(embed_model.output)
tensors_conv = [layer_conv(embed_tensor) for layer_conv in layers_conv]
tensors_matrix_sensor = []
for tensor_conv in tensors_conv:
tensor_sensors = []
tensor_sensors = [layer_sensor(tensor_conv) for layer_sensor in layers_sensor]
# tensor_sensors.append(L.GlobalMaxPooling1D()(tensor_conv))
# tensor_sensors.append(AttentionWeightedAverageLayer()(tensor_conv))
# tensor_sensors.append(L.GlobalAveragePooling1D()(tensor_conv))
tensors_matrix_sensor.append(tensor_sensors)
tensors_views = [layer_view(list(tensors)) for tensors in zip(*tensors_matrix_sensor)]
tensor = layer_allviews(tensors_views)
# tensors_v_cols = [L.concatenate(tensors, **config['v_col3']) for tensors
# in zip(*tensors_matrix_sensor)]
# tensor = L.concatenate(tensors_v_cols, **config['merged_tensor'])
for layer in layers_seq:
tensor = layer(tensor)
self.tf_model = tf.keras.Model(embed_model.inputs, tensor)
class KMax_CNN_Model(BaseClassificationModel):
@classmethod
def get_default_hyper_parameters(cls) -> Dict[str, Dict[str, Any]]:
return {
'spatial_dropout': {
'rate': 0.2
},
'conv_0': {
'filters': 180,
'kernel_size': 1,
'kernel_initializer': 'normal',
'padding': 'valid',
'activation': 'relu'
},
'conv_1': {
'filters': 180,
'kernel_size': 2,
'kernel_initializer': 'normal',
'padding': 'valid',
'activation': 'relu'
},
'conv_2': {
'filters': 180,
'kernel_size': 3,
'kernel_initializer': 'normal',
'padding': 'valid',
'activation': 'relu'
},
'conv_3': {
'filters': 180,
'kernel_size': 4,
'kernel_initializer': 'normal',
'padding': 'valid',
'activation': 'relu'
},
'maxpool_i4': {
'k': 3
},
'merged_tensor': {
# 'mode': 'concat',
'axis': 1
},
'dropout': {
'rate': 0.6
},
'dense': {
'units': 144,
'activation': 'relu'
},
'activation_layer': {
'activation': 'softmax'
},
}
def build_model_arc(self):
output_dim = len(self.processor.label2idx)
config = self.hyper_parameters
embed_model = self.embedding.embed_model
layer_embed_dropout = L.SpatialDropout1D(**config['spatial_dropout'])
layers_conv = [L.Conv1D(**config[f'conv_{i}']) for i in range(4)]
layers_sensor = [KMaxPoolingLayer(**config['maxpool_i4']),
L.Flatten()]
layer_concat = L.Concatenate(**config['merged_tensor'])
layers_seq = []
layers_seq.append(L.Dropout(**config['dropout']))
layers_seq.append(L.Dense(**config['dense']))
layers_seq.append(L.Dense(output_dim, **config['activation_layer']))
embed_tensor = layer_embed_dropout(embed_model.output)
tensors_conv = [layer_conv(embed_tensor) for layer_conv in layers_conv]
tensors_sensor = []
for tensor_conv in tensors_conv:
tensor_sensor = tensor_conv
for layer_sensor in layers_sensor:
tensor_sensor = layer_sensor(tensor_sensor)
tensors_sensor.append(tensor_sensor)
tensor = layer_concat(tensors_sensor)
# tensor = L.concatenate(tensors_sensor, **config['merged_tensor'])
for layer in layers_seq:
tensor = layer(tensor)
self.tf_model = tf.keras.Model(embed_model.inputs, tensor)
class R_CNN_Model(BaseClassificationModel):
@classmethod
def get_default_hyper_parameters(cls) -> Dict[str, Dict[str, Any]]:
return {
'spatial_dropout': {
'rate': 0.2
},
'rnn_0': {
'units': 64,
'return_sequences': True
},
'conv_0': {
'filters': 128,
'kernel_size': 2,
'kernel_initializer': 'normal',
'padding': 'valid',
'activation': 'relu',
'strides': 1
},
'maxpool': {},
'attn': {},
'average': {},
'concat': {
'axis': 1
},
'dropout': {
'rate': 0.5
},
'dense': {
'units': 120,
'activation': 'relu'
},
'activation_layer': {
'activation': 'softmax'
},
}
def build_model_arc(self):
output_dim = len(self.processor.label2idx)
config = self.hyper_parameters
embed_model = self.embedding.embed_model
layers_rcnn_seq = []
layers_rcnn_seq.append(L.SpatialDropout1D(**config['spatial_dropout']))
layers_rcnn_seq.append(L.Bidirectional(L.GRU(**config['rnn_0'])))
layers_rcnn_seq.append(L.Conv1D(**config['conv_0']))
layers_sensor = []
layers_sensor.append(L.GlobalMaxPooling1D())
layers_sensor.append(AttentionWeightedAverageLayer())
layers_sensor.append(L.GlobalAveragePooling1D())
layer_concat = L.Concatenate(**config['concat'])
layers_full_connect = []
layers_full_connect.append(L.Dropout(**config['dropout']))
layers_full_connect.append(L.Dense(**config['dense']))
layers_full_connect.append(L.Dense(output_dim, **config['activation_layer']))
tensor = embed_model.output
for layer in layers_rcnn_seq:
tensor = layer(tensor)
tensors_sensor = [layer(tensor) for layer in layers_sensor]
tensor_output = layer_concat(tensors_sensor)
# tensor_output = L.concatenate(tensor_sensors, **config['concat'])
for layer in layers_full_connect:
tensor_output = layer(tensor_output)
self.tf_model = tf.keras.Model(embed_model.inputs, tensor_output)
class AVRNN_Model(BaseClassificationModel):
@classmethod
def get_default_hyper_parameters(cls) -> Dict[str, Dict[str, Any]]:
return {
'spatial_dropout': {
'rate': 0.25
},
'rnn_0': {
'units': 60,
'return_sequences': True
},
'rnn_1': {
'units': 60,
'return_sequences': True
},
'concat_rnn': {
'axis': 2
},
'last': {},
'maxpool': {},
'attn': {},
'average': {},
'all_views': {
'axis': 1
},
'dropout': {
'rate': 0.5
},
'dense': {
'units': 144,
'activation': 'relu'
},
'activation_layer': {
'activation': 'softmax'
},
}
def build_model_arc(self):
output_dim = len(self.processor.label2idx)
config = self.hyper_parameters
embed_model = self.embedding.embed_model
layers_rnn0 = []
layers_rnn0.append(L.SpatialDropout1D(**config['spatial_dropout']))
layers_rnn0.append(L.Bidirectional(L.GRU(**config['rnn_0'])))
layer_bi_rnn1 = L.Bidirectional(L.GRU(**config['rnn_1']))
layer_concat = L.Concatenate(**config['concat_rnn'])
layers_sensor = []
layers_sensor.append(L.Lambda(lambda t: t[:, -1], name='last'))
layers_sensor.append(L.GlobalMaxPooling1D())
layers_sensor.append(AttentionWeightedAverageLayer())
layers_sensor.append(L.GlobalAveragePooling1D())
layer_allviews = L.Concatenate(**config['all_views'])
layers_full_connect = []
layers_full_connect.append(L.Dropout(**config['dropout']))
layers_full_connect.append(L.Dense(**config['dense']))
layers_full_connect.append(L.Dense(output_dim, **config['activation_layer']))
tensor_rnn = embed_model.output
for layer in layers_rnn0:
tensor_rnn = layer(tensor_rnn)
tensor_concat = layer_concat([tensor_rnn, layer_bi_rnn1(tensor_rnn)])
tensor_sensors = [layer(tensor_concat) for layer in layers_sensor]
tensor_output = layer_allviews(tensor_sensors)
for layer in layers_full_connect:
tensor_output = layer(tensor_output)
self.tf_model = tf.keras.Model(embed_model.inputs, tensor_output)
class Dropout_BiGRU_Model(BaseClassificationModel):
@classmethod
def get_default_hyper_parameters(cls) -> Dict[str, Dict[str, Any]]:
return {
'spatial_dropout': {
'rate': 0.15
},
'rnn_0': {
'units': 64,
'return_sequences': True
},
'dropout_rnn': {
'rate': 0.35
},
'rnn_1': {
'units': 64,
'return_sequences': True
},
'last': {},
'maxpool': {},
'average': {},
'all_views': {
'axis': 1
},
'dropout': {
'rate': 0.5
},
'dense': {
'units': 72,
'activation': 'relu'
},
'activation_layer': {
'activation': 'softmax'
},
}
def build_model_arc(self):
output_dim = len(self.processor.label2idx)
config = self.hyper_parameters
embed_model = self.embedding.embed_model
layers_rnn = []
layers_rnn.append(L.SpatialDropout1D(**config['spatial_dropout']))
layers_rnn.append(L.Bidirectional(L.GRU(**config['rnn_0'])))
layers_rnn.append(L.Dropout(**config['dropout_rnn']))
layers_rnn.append(L.Bidirectional(L.GRU(**config['rnn_1'])))
layers_sensor = []
layers_sensor.append(L.Lambda(lambda t: t[:, -1], name='last'))
layers_sensor.append(L.GlobalMaxPooling1D())
layers_sensor.append(L.GlobalAveragePooling1D())
layer_allviews = L.Concatenate(**config['all_views'])
layers_full_connect = []
layers_full_connect.append(L.Dropout(**config['dropout']))
layers_full_connect.append(L.Dense(**config['dense']))
layers_full_connect.append(L.Dense(output_dim, **config['activation_layer']))
tensor_rnn = embed_model.output
for layer in layers_rnn:
tensor_rnn = layer(tensor_rnn)
tensor_sensors = [layer(tensor_rnn) for layer in layers_sensor]
tensor_output = layer_allviews(tensor_sensors)
for layer in layers_full_connect:
tensor_output = layer(tensor_output)
self.tf_model = tf.keras.Model(embed_model.inputs, tensor_output)
class Dropout_AVRNN_Model(BaseClassificationModel):
@classmethod
def get_default_hyper_parameters(cls) -> Dict[str, Dict[str, Any]]:
return {
'spatial_dropout': {
'rate': 0.25
},
'rnn_0': {
'units': 56,
'return_sequences': True
},
'rnn_dropout': {
'rate': 0.3
},
'rnn_1': {
'units': 56,
'return_sequences': True
},
'last': {},
'maxpool': {},
'attn': {},
'average': {},
'all_views': {
'axis': 1
},
'dropout_0': {
'rate': 0.5
},
'dense': {
'units': 128,
'activation': 'relu'
},
'dropout_1': {
'rate': 0.25
},
'activation_layer': {
'activation': 'softmax'
},
}
def build_model_arc(self):
output_dim = len(self.processor.label2idx)
config = self.hyper_parameters
embed_model = self.embedding.embed_model
layers_rnn = []
layers_rnn.append(L.SpatialDropout1D(**config['spatial_dropout']))
layers_rnn.append(L.Bidirectional(L.GRU(**config['rnn_0'])))
layers_rnn.append(L.SpatialDropout1D(**config['rnn_dropout']))
layers_rnn.append(L.Bidirectional(L.GRU(**config['rnn_1'])))
layers_sensor = []
layers_sensor.append(L.Lambda(lambda t: t[:, -1], name='last'))
layers_sensor.append(L.GlobalMaxPooling1D())
layers_sensor.append(AttentionWeightedAverageLayer())
layers_sensor.append(L.GlobalAveragePooling1D())
layer_allviews = L.Concatenate(**config['all_views'])
layers_full_connect = []
layers_full_connect.append(L.Dropout(**config['dropout_0']))
layers_full_connect.append(L.Dense(**config['dense']))
layers_full_connect.append(L.Dropout(**config['dropout_1']))
layers_full_connect.append(L.Dense(output_dim, **config['activation_layer']))
tensor_rnn = embed_model.output
for layer in layers_rnn:
tensor_rnn = layer(tensor_rnn)
tensor_sensors = [layer(tensor_rnn) for layer in layers_sensor]
tensor_output = layer_allviews(tensor_sensors)
for layer in layers_full_connect:
tensor_output = layer(tensor_output)
self.tf_model = tf.keras.Model(embed_model.inputs, tensor_output)
if __name__ == "__main__":
print(BiLSTM_Model.get_default_hyper_parameters())
logging.basicConfig(level=logging.DEBUG)
from kashgari.corpus import SMP2018ECDTCorpus
x, y = SMP2018ECDTCorpus.load_data()
import kashgari
from kashgari.processors.classification_processor import ClassificationProcessor
from kashgari.embeddings import BareEmbedding
processor = ClassificationProcessor(multi_label=False)
embed = BareEmbedding(task=kashgari.CLASSIFICATION, sequence_length=30, processor=processor)
m = BiLSTM_Model(embed)
# m.build_model(x, y)
m.fit(x, y, epochs=2)
print(m.predict(x[:10]))
# m.evaluate(x, y)
print(m.predict_top_k_class(x[:10]))
| 125softNLP | /125softNLP-0.0.1-py3-none-any.whl/kashgari/tasks/classification/models.py | models.py |
# encoding: utf-8
# author: Alex
# contact: ialexwwang@gmail.com
# version: 0.1
# license: Apache Licence
# file: dpcnn_model.py
# time: 2019-07-02 19:15
# Reference:
# https://ai.tencent.com/ailab/media/publications/ACL3-Brady.pdf
# https://github.com/Cheneng/DPCNN
# https://github.com/miracleyoo/DPCNN-TextCNN-Pytorch-Inception
# https://www.kaggle.com/michaelsnell/conv1d-dpcnn-in-keras
from math import log2, floor
from typing import Dict, Any
import tensorflow as tf
from kashgari.layers import L, KMaxPoolingLayer
from kashgari.tasks.classification.base_model import BaseClassificationModel
class DPCNN_Model(BaseClassificationModel):
'''
This implementation of DPCNN requires a clear declared sequence length.
So sequences input in should be padded or cut to a given length in advance.
'''
@classmethod
def get_default_hyper_parameters(cls) -> Dict[str, Dict[str, Any]]:
pool_type = 'max'
filters = 250
activation = 'linear'
return {
'region_embedding': {
'filters': filters,
'kernel_size': 3,
'strides': 1,
'padding': 'same',
'activation': activation,
'name': 'region_embedding',
},
'region_dropout': {
'rate': 0.2,
},
'conv_block': {
'filters': filters,
'kernel_size': 3,
'activation': activation,
'shortcut': True,
},
'resnet_block': {
'filters': filters,
'kernel_size': 3,
'activation': activation,
'shortcut': True,
'pool_type': pool_type,
'sorted': True,
},
'dense': {
'units': 256,
'activation': activation,
},
'dropout': {
'rate': 0.5,
},
'activation': {
'activation': 'softmax',
}
}
def downsample(self, inputs, pool_type: str = 'max',
sorted: bool = True, stage: int = 1): # noqa: A002
layers_pool = []
if pool_type == 'max':
layers_pool.append(
L.MaxPooling1D(pool_size=3,
strides=2,
padding='same',
name=f'pool_{stage}'))
elif pool_type == 'k_max':
k = int(inputs.shape[1].value / 2)
layers_pool.append(
KMaxPoolingLayer(k=k,
sorted=sorted,
name=f'pool_{stage}'))
elif pool_type == 'conv':
layers_pool.append(
L.Conv1D(filters=inputs.shape[-1].value,
kernel_size=3,
strides=2,
padding='same',
name=f'pool_{stage}'))
layers_pool.append(
L.BatchNormalization())
elif pool_type is None:
layers_pool = []
else:
raise ValueError(f'unsupported pooling type `{pool_type}`!')
tensor_out = inputs
for layer in layers_pool:
tensor_out = layer(tensor_out)
return tensor_out
def conv_block(self, inputs, filters: int, kernel_size: int = 3,
activation: str = 'linear', shortcut: bool = True):
layers_conv_unit = []
layers_conv_unit.append(
L.BatchNormalization())
layers_conv_unit.append(
L.PReLU())
layers_conv_unit.append(
L.Conv1D(filters=filters,
kernel_size=kernel_size,
strides=1,
padding='same',
activation=activation))
layers_conv_block = layers_conv_unit * 2
tensor_out = inputs
for layer in layers_conv_block:
tensor_out = layer(tensor_out)
if shortcut:
tensor_out = L.Add()([inputs, tensor_out])
return tensor_out
def resnet_block(self, inputs, filters: int, kernel_size: int = 3,
activation: str = 'linear', shortcut: bool = True,
pool_type: str = 'max', sorted: bool = True, stage: int = 1): # noqa: A002
tensor_pool = self.downsample(inputs, pool_type=pool_type, sorted=sorted, stage=stage)
tensor_out = self.conv_block(tensor_pool, filters=filters, kernel_size=kernel_size,
activation=activation, shortcut=shortcut)
return tensor_out
def build_model_arc(self):
output_dim = len(self.processor.label2idx)
config = self.hyper_parameters
embed_model = self.embedding.embed_model
layers_region = [
L.Conv1D(**config['region_embedding']),
L.BatchNormalization(),
L.PReLU(),
L.Dropout(**config['region_dropout'])
]
layers_main = [
L.GlobalMaxPooling1D(),
L.Dense(**config['dense']),
L.BatchNormalization(),
L.PReLU(),
L.Dropout(**config['dropout']),
L.Dense(output_dim, **config['activation'])
]
tensor_out = embed_model.output
# build region tensors
for layer in layers_region:
tensor_out = layer(tensor_out)
# build the base pyramid layer
tensor_out = self.conv_block(tensor_out, **config['conv_block'])
# build the above pyramid layers while `steps > 2`
seq_len = tensor_out.shape[1].value
if seq_len is None:
raise ValueError('`sequence_length` should be explicitly assigned, but it is `None`.')
for i in range(floor(log2(seq_len)) - 2):
tensor_out = self.resnet_block(tensor_out, stage=i + 1,
**config['resnet_block'])
for layer in layers_main:
tensor_out = layer(tensor_out)
self.tf_model = tf.keras.Model(embed_model.inputs, tensor_out)
| 125softNLP | /125softNLP-0.0.1-py3-none-any.whl/kashgari/tasks/classification/dpcnn_model.py | dpcnn_model.py |
# encoding: utf-8
# author: BrikerMan
# contact: eliyar917@gmail.com
# blog: https://eliyar.biz
# file: base_model.py
# time: 11:36 上午
from typing import Callable
from typing import Dict, Any
import numpy as np
from sklearn import metrics
from kashgari.tasks.base_model import BaseModel
class BaseScoringModel(BaseModel):
"""Base Sequence Labeling Model"""
__task__ = 'scoring'
@classmethod
def get_default_hyper_parameters(cls) -> Dict[str, Dict[str, Any]]:
raise NotImplementedError
def compile_model(self, **kwargs):
if kwargs.get('loss') is None:
kwargs['loss'] = 'mse'
if kwargs.get('optimizer') is None:
kwargs['optimizer'] = 'rmsprop'
if kwargs.get('metrics') is None:
kwargs['metrics'] = ['mae']
super(BaseScoringModel, self).compile_model(**kwargs)
def evaluate(self,
x_data,
y_data,
batch_size=None,
should_round: bool = False,
round_func: Callable = None,
digits=4,
debug_info=False) -> Dict:
"""
Build a text report showing the main classification metrics.
Args:
x_data:
y_data:
batch_size:
should_round:
round_func:
digits:
debug_info:
Returns:
"""
y_pred = self.predict(x_data, batch_size=batch_size)
if should_round:
if round_func is None:
round_func = np.round
print(self.processor.output_dim)
if self.processor.output_dim != 1:
raise ValueError('Evaluate with round function only accept 1D output')
y_pred = [round_func(i) for i in y_pred]
report = metrics.classification_report(y_data,
y_pred,
digits=digits)
report_dic = metrics.classification_report(y_data,
y_pred,
output_dict=True,
digits=digits)
print(report)
else:
mean_squared_error = metrics.mean_squared_error(y_data, y_pred)
r2_score = metrics.r2_score(y_data, y_pred)
report_dic = {
'mean_squared_error': mean_squared_error,
'r2_score': r2_score
}
print(f"mean_squared_error : {mean_squared_error}\n"
f"r2_score : {r2_score}")
return report_dic
if __name__ == "__main__":
pass
| 125softNLP | /125softNLP-0.0.1-py3-none-any.whl/kashgari/tasks/scoring/base_model.py | base_model.py |
# encoding: utf-8
# author: BrikerMan
# contact: eliyar917@gmail.com
# blog: https://eliyar.biz
# file: __init__.py
# time: 11:36 上午
from kashgari.tasks.scoring.models import BiLSTM_Model
if __name__ == "__main__":
pass
| 125softNLP | /125softNLP-0.0.1-py3-none-any.whl/kashgari/tasks/scoring/__init__.py | __init__.py |
# encoding: utf-8
# author: BrikerMan
# contact: eliyar917@gmail.com
# blog: https://eliyar.biz
# file: models.py
# time: 11:38 上午
import logging
from typing import Dict, Any
from tensorflow import keras
from kashgari.tasks.scoring.base_model import BaseScoringModel
from kashgari.layers import L
class BiLSTM_Model(BaseScoringModel):
@classmethod
def get_default_hyper_parameters(cls) -> Dict[str, Dict[str, Any]]:
return {
'layer_bi_lstm': {
'units': 128,
'return_sequences': False
},
'layer_dense': {
'activation': 'linear'
}
}
def build_model_arc(self):
output_dim = self.processor.output_dim
config = self.hyper_parameters
embed_model = self.embedding.embed_model
layer_bi_lstm = L.Bidirectional(L.LSTM(**config['layer_bi_lstm']))
layer_dense = L.Dense(output_dim, **config['layer_dense'])
tensor = layer_bi_lstm(embed_model.output)
output_tensor = layer_dense(tensor)
self.tf_model = keras.Model(embed_model.inputs, output_tensor)
if __name__ == "__main__":
from kashgari.corpus import SMP2018ECDTCorpus
import numpy as np
x, y = SMP2018ECDTCorpus.load_data('valid')
y = np.random.random((len(x), 4))
model = BiLSTM_Model()
model.fit(x, y)
print(model.predict(x[:10]))
| 125softNLP | /125softNLP-0.0.1-py3-none-any.whl/kashgari/tasks/scoring/models.py | models.py |
# encoding: utf-8
# author: BrikerMan
# contact: eliyar917@gmail.com
# blog: https://eliyar.biz
# file: base_embedding.py
# time: 2019-05-20 17:40
import json
import logging
import pydoc
from typing import Union, List, Optional, Dict
import numpy as np
from tensorflow import keras
import kashgari
from kashgari.processors import ClassificationProcessor, LabelingProcessor, ScoringProcessor
from kashgari.processors.base_processor import BaseProcessor
L = keras.layers
class Embedding(object):
"""Base class for Embedding Model"""
def info(self) -> Dict:
return {
'processor': self.processor.info(),
'class_name': self.__class__.__name__,
'module': self.__class__.__module__,
'config': {
'sequence_length': self.sequence_length,
'embedding_size': self.embedding_size,
'task': self.task
},
'embed_model': json.loads(self.embed_model.to_json()),
}
@classmethod
def _load_saved_instance(cls,
config_dict: Dict,
model_path: str,
tf_model: keras.Model):
processor_info = config_dict['processor']
processor_class = pydoc.locate(f"{processor_info['module']}.{processor_info['class_name']}")
processor = processor_class(**processor_info['config'])
instance = cls(processor=processor,
from_saved_model=True, **config_dict['config'])
embed_model_json_str = json.dumps(config_dict['embed_model'])
instance.embed_model = keras.models.model_from_json(embed_model_json_str,
custom_objects=kashgari.custom_objects)
# Load Weights from model
for layer in instance.embed_model.layers:
layer.set_weights(tf_model.get_layer(layer.name).get_weights())
return instance
def __init__(self,
task: str = None,
sequence_length: Union[int, str] = 'auto',
embedding_size: int = 100,
processor: Optional[BaseProcessor] = None,
from_saved_model: bool = False):
self.task = task
self.embedding_size = embedding_size
if processor is None:
if task == kashgari.CLASSIFICATION:
self.processor = ClassificationProcessor()
elif task == kashgari.LABELING:
self.processor = LabelingProcessor()
elif task == kashgari.SCORING:
self.processor = ScoringProcessor()
else:
raise ValueError('Need to set the processor param, value: {labeling, classification, scoring}')
else:
self.processor = processor
self.sequence_length: Union[int, str] = sequence_length
self.embed_model: Optional[keras.Model] = None
self._tokenizer = None
@property
def token_count(self) -> int:
"""
corpus token count
"""
return len(self.processor.token2idx)
@property
def sequence_length(self) -> Union[int, str]:
"""
model sequence length
"""
return self.processor.sequence_length
@property
def label2idx(self) -> Dict[str, int]:
"""
label to index dict
"""
return self.processor.label2idx
@property
def token2idx(self) -> Dict[str, int]:
"""
token to index dict
"""
return self.processor.token2idx
@property
def tokenizer(self):
if self._tokenizer:
return self._tokenizer
else:
raise ValueError('This embedding not support built-in tokenizer')
@sequence_length.setter
def sequence_length(self, val: Union[int, str]):
if isinstance(val, str):
if val == 'auto':
logging.warning("Sequence length will auto set at 95% of sequence length")
elif val == 'variable':
val = None
else:
raise ValueError("sequence_length must be an int or 'auto' or 'variable'")
self.processor.sequence_length = val
def _build_model(self, **kwargs):
raise NotImplementedError
def analyze_corpus(self,
x: List[List[str]],
y: Union[List[List[str]], List[str]]):
"""
Prepare embedding layer and pre-processor for labeling task
Args:
x:
y:
Returns:
"""
self.processor.analyze_corpus(x, y)
if self.sequence_length == 'auto':
self.sequence_length = self.processor.dataset_info['RECOMMEND_LEN']
self._build_model()
def embed_one(self, sentence: Union[List[str], List[int]]) -> np.array:
"""
Convert one sentence to vector
Args:
sentence: target sentence, list of str
Returns:
vectorized sentence
"""
return self.embed([sentence])[0]
def embed(self,
sentence_list: Union[List[List[str]], List[List[int]]],
debug: bool = False) -> np.ndarray:
"""
batch embed sentences
Args:
sentence_list: Sentence list to embed
debug: show debug info
Returns:
vectorized sentence list
"""
tensor_x = self.process_x_dataset(sentence_list)
if debug:
logging.debug(f'sentence tensor: {tensor_x}')
embed_results = self.embed_model.predict(tensor_x)
return embed_results
def process_x_dataset(self,
data: List[List[str]],
subset: Optional[List[int]] = None) -> np.ndarray:
"""
batch process feature data while training
Args:
data: target dataset
subset: subset index list
Returns:
vectorized feature tensor
"""
return self.processor.process_x_dataset(data, self.sequence_length, subset)
def process_y_dataset(self,
data: List[List[str]],
subset: Optional[List[int]] = None) -> np.ndarray:
"""
batch process labels data while training
Args:
data: target dataset
subset: subset index list
Returns:
vectorized feature tensor
"""
return self.processor.process_y_dataset(data, self.sequence_length, subset)
def reverse_numerize_label_sequences(self,
sequences,
lengths=None):
return self.processor.reverse_numerize_label_sequences(sequences, lengths=lengths)
def __repr__(self):
return f"<{self.__class__} seq_len: {self.sequence_length}>"
def __str__(self):
return self.__repr__()
if __name__ == "__main__":
print("Hello world")
| 125softNLP | /125softNLP-0.0.1-py3-none-any.whl/kashgari/embeddings/base_embedding.py | base_embedding.py |
# encoding: utf-8
# author: BrikerMan
# contact: eliyar917@gmail.com
# blog: https://eliyar.biz
# file: base_embedding.py
# time: 2019-05-26 17:40
import os
os.environ['TF_KERAS'] = '1'
import logging
from typing import Union, Optional, Any, List, Tuple
import numpy as np
import kashgari
import pathlib
from tensorflow.python.keras.utils import get_file
from kashgari.embeddings.base_embedding import Embedding
from kashgari.processors.base_processor import BaseProcessor
import keras_gpt_2 as gpt2
class GPT2Embedding(Embedding):
"""Pre-trained BERT embedding"""
def info(self):
info = super(GPT2Embedding, self).info()
info['config'] = {
'model_folder': self.model_folder,
'sequence_length': self.sequence_length
}
return info
def __init__(self,
model_folder: str,
task: str = None,
sequence_length: Union[Tuple[int, ...], str, int] = 'auto',
processor: Optional[BaseProcessor] = None,
from_saved_model: bool = False):
"""
Args:
task:
model_folder:
sequence_length:
processor:
from_saved_model:
"""
super(GPT2Embedding, self).__init__(task=task,
sequence_length=sequence_length,
embedding_size=0,
processor=processor,
from_saved_model=from_saved_model)
if isinstance(sequence_length, tuple):
if len(sequence_length) > 2:
raise ValueError('BERT only more 2')
else:
if not all([s == sequence_length[0] for s in sequence_length]):
raise ValueError('BERT only receive all')
if sequence_length == 'variable':
self.sequence_length = None
self.processor.token_pad = 'pad'
self.processor.token_unk = 'unk'
self.processor.token_bos = 'pad'
self.processor.token_eos = 'pad'
self.model_folder = model_folder
if not from_saved_model:
self._build_token2idx_from_gpt()
self._build_model()
def _build_token2idx_from_gpt(self):
encoder_path = os.path.join(self.model_folder, 'encoder.json')
vocab_path = os.path.join(self.model_folder, 'vocab.bpe')
bpe: gpt2.BytePairEncoding = gpt2.get_bpe_from_files(encoder_path, vocab_path)
token2idx = bpe.token_dict.copy()
self.processor.token2idx = token2idx
self.processor.idx2token = dict([(value, key) for key, value in token2idx.items()])
def _build_model(self, **kwargs):
if self.embed_model is None and self.sequence_length != 'auto':
config_path = os.path.join(self.model_folder, 'hparams.json')
checkpoint_path = os.path.join(self.model_folder, 'model.ckpt')
model = gpt2.load_trained_model_from_checkpoint(config_path,
checkpoint_path,
self.sequence_length)
if not kashgari.config.disable_auto_summary:
model.summary()
self.embed_model = model
# if self.token_count == 0:
# logging.debug('need to build after build_word2idx')
# elif self.embed_model is None:
# seq_len = self.sequence_length
# if isinstance(seq_len, tuple):
# seq_len = seq_len[0]
# if isinstance(seq_len, str):
# return
# config_path = os.path.join(self.bert_path, 'bert_config.json')
# check_point_path = os.path.join(self.bert_path, 'bert_model.ckpt')
# bert_model = keras_bert.load_trained_model_from_checkpoint(config_path,
# check_point_path,
# seq_len=seq_len)
#
# self._model = tf.keras.Model(bert_model.inputs, bert_model.output)
# bert_seq_len = int(bert_model.output.shape[1])
# if bert_seq_len < seq_len:
# logging.warning(f"Sequence length limit set to {bert_seq_len} by pre-trained model")
# self.sequence_length = bert_seq_len
# self.embedding_size = int(bert_model.output.shape[-1])
# num_layers = len(bert_model.layers)
# bert_model.summary()
# target_layer_idx = [num_layers - 1 + idx * 8 for idx in range(-3, 1)]
# features_layers = [bert_model.get_layer(index=idx).output for idx in target_layer_idx]
# embedding_layer = L.concatenate(features_layers)
# output_features = NonMaskingLayer()(embedding_layer)
#
# self.embed_model = tf.keras.Model(bert_model.inputs, output_features)
# logging.warning(f'seq_len: {self.sequence_length}')
def analyze_corpus(self,
x: Union[Tuple[List[List[str]], ...], List[List[str]]],
y: Union[List[List[Any]], List[Any]]):
"""
Prepare embedding layer and pre-processor for labeling task
Args:
x:
y:
Returns:
"""
if len(self.processor.token2idx) == 0:
self._build_token2idx_from_gpt()
super(GPT2Embedding, self).analyze_corpus(x, y)
def embed(self,
sentence_list: Union[Tuple[List[List[str]], ...], List[List[str]]],
debug: bool = False) -> np.ndarray:
"""
batch embed sentences
Args:
sentence_list: Sentence list to embed
debug: show debug log
Returns:
vectorized sentence list
"""
tensor_x = self.process_x_dataset(sentence_list)
if debug:
logging.debug(f'sentence tensor: {tensor_x}')
embed_results = self.embed_model.predict(tensor_x)
return embed_results
def process_x_dataset(self,
data: Union[Tuple[List[List[str]], ...], List[List[str]]],
subset: Optional[List[int]] = None) -> Tuple[np.ndarray, ...]:
"""
batch process feature data while training
Args:
data: target dataset
subset: subset index list
Returns:
vectorized feature tensor
"""
x1 = None
if isinstance(data, tuple):
if len(data) == 2:
x0 = self.processor.process_x_dataset(data[0], self.sequence_length, subset)
x1 = self.processor.process_x_dataset(data[1], self.sequence_length, subset)
else:
x0 = self.processor.process_x_dataset(data[0], self.sequence_length, subset)
else:
x0 = self.processor.process_x_dataset(data, self.sequence_length, subset)
if x1 is None:
x1 = np.zeros(x0.shape, dtype=np.int32)
return x0, x1
@classmethod
def load_data(cls, model_name):
"""
Download pretrained GPT-2 models
Args:
model_name: {117M, 345M}
Returns:
GPT-2 model folder
"""
model_folder: pathlib.Path = pathlib.Path(os.path.join(kashgari.macros.DATA_PATH,
'datasets',
f'gpt2-{model_name}'))
model_folder.mkdir(exist_ok=True, parents=True)
for filename in ['checkpoint', 'encoder.json', 'hparams.json', 'model.ckpt.data-00000-of-00001',
'model.ckpt.index', 'model.ckpt.meta', 'vocab.bpe']:
url = "https://storage.googleapis.com/gpt-2/models/" + model_name + "/" + filename
get_file(os.path.join(f'gpt2-{model_name}', filename),
url,
cache_dir=kashgari.macros.DATA_PATH)
return str(model_folder)
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG)
# bert_model_path = os.path.join(utils.get_project_path(), 'tests/test-data/bert')
model_folder = GPT2Embedding.load_data('117M')
print(model_folder)
b = GPT2Embedding(task=kashgari.CLASSIFICATION,
model_folder=model_folder,
sequence_length=12)
# from kashgari.corpus import SMP2018ECDTCorpus
# test_x, test_y = SMP2018ECDTCorpus.load_data('valid')
# b.analyze_corpus(test_x, test_y)
data1 = 'all work and no play makes'.split(' ')
r = b.embed([data1], True)
print(r)
print(r.shape)
| 125softNLP | /125softNLP-0.0.1-py3-none-any.whl/kashgari/embeddings/gpt_2_embedding.py | gpt_2_embedding.py |
# encoding: utf-8
# author: BrikerMan
# contact: eliyar917@gmail.com
# blog: https://eliyar.biz
# file: bert_embedding_v2.py
# time: 10:03 上午
import os
os.environ['TF_KERAS'] = '1'
import json
import codecs
import logging
from typing import Union, Optional
from bert4keras.models import build_transformer_model
import kashgari
import tensorflow as tf
from kashgari.embeddings.bert_embedding import BERTEmbedding
from kashgari.layers import NonMaskingLayer
from kashgari.processors.base_processor import BaseProcessor
import keras_bert
class BERTEmbeddingV2(BERTEmbedding):
"""Pre-trained BERT embedding"""
def info(self):
info = super(BERTEmbedding, self).info()
info['config'] = {
'model_folder': self.model_folder,
'sequence_length': self.sequence_length
}
return info
def __init__(self,
vacab_path: str,
config_path: str,
checkpoint_path: str,
bert_type: str = 'bert',
task: str = None,
sequence_length: Union[str, int] = 'auto',
processor: Optional[BaseProcessor] = None,
from_saved_model: bool = False):
"""
"""
self.model_folder = ''
self.vacab_path = vacab_path
self.config_path = config_path
self.checkpoint_path = checkpoint_path
super(BERTEmbedding, self).__init__(task=task,
sequence_length=sequence_length,
embedding_size=0,
processor=processor,
from_saved_model=from_saved_model)
self.bert_type = bert_type
self.processor.token_pad = '[PAD]'
self.processor.token_unk = '[UNK]'
self.processor.token_bos = '[CLS]'
self.processor.token_eos = '[SEP]'
self.processor.add_bos_eos = True
if not from_saved_model:
self._build_token2idx_from_bert()
self._build_model()
def _build_token2idx_from_bert(self):
token2idx = {}
with codecs.open(self.vacab_path, 'r', 'utf8') as reader:
for line in reader:
token = line.strip()
token2idx[token] = len(token2idx)
self.bert_token2idx = token2idx
self._tokenizer = keras_bert.Tokenizer(token2idx)
self.processor.token2idx = self.bert_token2idx
self.processor.idx2token = dict([(value, key) for key, value in token2idx.items()])
def _build_model(self, **kwargs):
if self.embed_model is None:
seq_len = self.sequence_length
if isinstance(seq_len, tuple):
seq_len = seq_len[0]
if isinstance(seq_len, str):
logging.warning(f"Model will be built when sequence length is determined")
return
config_path = self.config_path
config = json.load(open(config_path))
if seq_len > config.get('max_position_embeddings'):
seq_len = config.get('max_position_embeddings')
logging.warning(f"Max seq length is {seq_len}")
bert_model = build_transformer_model(config_path=self.config_path,
checkpoint_path=self.checkpoint_path,
model=self.bert_type,
application='encoder',
return_keras_model=True)
self.embed_model = bert_model
self.embedding_size = int(bert_model.output.shape[-1])
output_features = NonMaskingLayer()(bert_model.output)
self.embed_model = tf.keras.Model(bert_model.inputs, output_features)
if __name__ == "__main__":
# BERT_PATH = '/Users/brikerman/Desktop/nlp/language_models/bert/chinese_L-12_H-768_A-12'
model_folder = '/Users/brikerman/Desktop/nlp/language_models/albert_base'
checkpoint_path = os.path.join(model_folder, 'model.ckpt-best')
config_path = os.path.join(model_folder, 'albert_config.json')
vacab_path = os.path.join(model_folder, 'vocab_chinese.txt')
embed = BERTEmbeddingV2(vacab_path, config_path, checkpoint_path,
bert_type='albert',
task=kashgari.CLASSIFICATION,
sequence_length=100)
x = embed.embed_one(list('今天天气不错'))
print(x)
| 125softNLP | /125softNLP-0.0.1-py3-none-any.whl/kashgari/embeddings/bert_embedding_v2.py | bert_embedding_v2.py |
# encoding: utf-8
# author: BrikerMan
# contact: eliyar917@gmail.com
# blog: https://eliyar.biz
# file: stacked_embedding.py
# time: 2019-05-23 09:18
import json
import pydoc
from typing import Union, Optional, Tuple, List, Dict
import numpy as np
import tensorflow as tf
from tensorflow.python import keras
import kashgari
from kashgari.embeddings.base_embedding import Embedding
from kashgari.layers import L
from kashgari.processors.base_processor import BaseProcessor
class StackedEmbedding(Embedding):
"""Embedding layer without pre-training, train embedding layer while training model"""
@classmethod
def _load_saved_instance(cls,
config_dict: Dict,
model_path: str,
tf_model: keras.Model):
embeddings = []
for embed_info in config_dict['embeddings']:
embed_class = pydoc.locate(f"{embed_info['module']}.{embed_info['class_name']}")
embedding: Embedding = embed_class._load_saved_instance(embed_info,
model_path,
tf_model)
embeddings.append(embedding)
instance = cls(embeddings=embeddings,
from_saved_model=True)
print('----')
print(instance.embeddings)
embed_model_json_str = json.dumps(config_dict['embed_model'])
instance.embed_model = keras.models.model_from_json(embed_model_json_str,
custom_objects=kashgari.custom_objects)
# Load Weights from model
for layer in instance.embed_model.layers:
layer.set_weights(tf_model.get_layer(layer.name).get_weights())
return instance
def info(self):
info = super(StackedEmbedding, self).info()
info['embeddings'] = [embed.info() for embed in self.embeddings]
info['config'] = {}
return info
def __init__(self,
embeddings: List[Embedding],
processor: Optional[BaseProcessor] = None,
from_saved_model: bool = False):
"""
Args:
embeddings:
processor:
"""
task = kashgari.CLASSIFICATION
if all(isinstance(embed.sequence_length, int) for embed in embeddings):
sequence_length = [embed.sequence_length for embed in embeddings]
else:
raise ValueError('Need to set sequence length for all embeddings while using stacked embedding')
super(StackedEmbedding, self).__init__(task=task,
sequence_length=sequence_length[0],
embedding_size=100,
processor=processor,
from_saved_model=from_saved_model)
self.embeddings = embeddings
self.processor = embeddings[0].processor
if not from_saved_model:
self._build_model()
def _build_model(self, **kwargs):
if self.embed_model is None and all(embed.embed_model is not None for embed in self.embeddings):
layer_concatenate = L.Concatenate(name='layer_concatenate')
inputs = []
for embed in self.embeddings:
inputs += embed.embed_model.inputs
# inputs = [embed.embed_model.inputs for embed in self.embeddings]
outputs = layer_concatenate([embed.embed_model.output for embed in self.embeddings])
self.embed_model = tf.keras.Model(inputs, outputs)
def analyze_corpus(self,
x: Union[Tuple[List[List[str]], ...], List[List[str]]],
y: Union[List[List[str]], List[str]]):
for index in range(len(x)):
self.embeddings[index].analyze_corpus(x[index], y)
self._build_model()
def process_x_dataset(self,
data: Tuple[List[List[str]], ...],
subset: Optional[List[int]] = None) -> Tuple[np.ndarray, ...]:
"""
batch process feature data while training
Args:
data: target dataset
subset: subset index list
Returns:
vectorized feature tensor
"""
result = []
for index, dataset in enumerate(data):
x = self.embeddings[index].process_x_dataset(dataset, subset)
if isinstance(x, tuple):
result += list(x)
else:
result.append(x)
return tuple(result)
def process_y_dataset(self,
data: List[List[str]],
subset: Optional[List[int]] = None) -> np.ndarray:
return self.embeddings[0].process_y_dataset(data, subset)
if __name__ == "__main__":
pass
| 125softNLP | /125softNLP-0.0.1-py3-none-any.whl/kashgari/embeddings/stacked_embedding.py | stacked_embedding.py |
# encoding: utf-8
# author: BrikerMan
# contact: eliyar917@gmail.com
# blog: https://eliyar.biz
# file: base_embedding.py
# time: 2019-05-25 17:40
import os
os.environ['TF_KERAS'] = '1'
import codecs
import logging
from typing import Union, Optional, Any, List, Tuple
import numpy as np
import kashgari
import tensorflow as tf
from kashgari.layers import NonMaskingLayer
from kashgari.embeddings.base_embedding import Embedding
from kashgari.processors.base_processor import BaseProcessor
import keras_bert
class BERTEmbedding(Embedding):
"""Pre-trained BERT embedding"""
def info(self):
info = super(BERTEmbedding, self).info()
info['config'] = {
'model_folder': self.model_folder,
'sequence_length': self.sequence_length
}
return info
def __init__(self,
model_folder: str,
layer_nums: int = 4,
trainable: bool = False,
task: str = None,
sequence_length: Union[str, int] = 'auto',
processor: Optional[BaseProcessor] = None,
from_saved_model: bool = False):
"""
Args:
task:
model_folder:
layer_nums: number of layers whose outputs will be concatenated into a single tensor,
default `4`, output the last 4 hidden layers as the thesis suggested
trainable: whether if the model is trainable, default `False` and set it to `True`
for fine-tune this embedding layer during your training
sequence_length:
processor:
from_saved_model:
"""
self.trainable = trainable
# Do not need to train the whole bert model if just to use its feature output
self.training = False
self.layer_nums = layer_nums
if isinstance(sequence_length, tuple):
raise ValueError('BERT embedding only accept `int` type `sequence_length`')
if sequence_length == 'variable':
raise ValueError('BERT embedding only accept sequences in equal length')
super(BERTEmbedding, self).__init__(task=task,
sequence_length=sequence_length,
embedding_size=0,
processor=processor,
from_saved_model=from_saved_model)
self.processor.token_pad = '[PAD]'
self.processor.token_unk = '[UNK]'
self.processor.token_bos = '[CLS]'
self.processor.token_eos = '[SEP]'
self.processor.add_bos_eos = True
self.model_folder = model_folder
if not from_saved_model:
self._build_token2idx_from_bert()
self._build_model()
def _build_token2idx_from_bert(self):
dict_path = os.path.join(self.model_folder, 'vocab.txt')
token2idx = {}
with codecs.open(dict_path, 'r', 'utf8') as reader:
for line in reader:
token = line.strip()
token2idx[token] = len(token2idx)
self.bert_token2idx = token2idx
self._tokenizer = keras_bert.Tokenizer(token2idx)
self.processor.token2idx = self.bert_token2idx
self.processor.idx2token = dict([(value, key) for key, value in token2idx.items()])
def _build_model(self, **kwargs):
if self.embed_model is None:
seq_len = self.sequence_length
if isinstance(seq_len, tuple):
seq_len = seq_len[0]
if isinstance(seq_len, str):
logging.warning(f"Model will be built until sequence length is determined")
return
config_path = os.path.join(self.model_folder, 'bert_config.json')
check_point_path = os.path.join(self.model_folder, 'bert_model.ckpt')
bert_model = keras_bert.load_trained_model_from_checkpoint(config_path,
check_point_path,
seq_len=seq_len,
output_layer_num=self.layer_nums,
training=self.training,
trainable=self.trainable)
self._model = tf.keras.Model(bert_model.inputs, bert_model.output)
bert_seq_len = int(bert_model.output.shape[1])
if bert_seq_len < seq_len:
logging.warning(f"Sequence length limit set to {bert_seq_len} by pre-trained model")
self.sequence_length = bert_seq_len
self.embedding_size = int(bert_model.output.shape[-1])
output_features = NonMaskingLayer()(bert_model.output)
self.embed_model = tf.keras.Model(bert_model.inputs, output_features)
logging.warning(f'seq_len: {self.sequence_length}')
def analyze_corpus(self,
x: Union[Tuple[List[List[str]], ...], List[List[str]]],
y: Union[List[List[Any]], List[Any]]):
"""
Prepare embedding layer and pre-processor for labeling task
Args:
x:
y:
Returns:
"""
if len(self.processor.token2idx) == 0:
self._build_token2idx_from_bert()
super(BERTEmbedding, self).analyze_corpus(x, y)
def embed(self,
sentence_list: Union[Tuple[List[List[str]], ...], List[List[str]]],
debug: bool = False) -> np.ndarray:
"""
batch embed sentences
Args:
sentence_list: Sentence list to embed
debug: show debug log
Returns:
vectorized sentence list
"""
if self.embed_model is None:
raise ValueError('need to build model for embed sentence')
tensor_x = self.process_x_dataset(sentence_list)
if debug:
logging.debug(f'sentence tensor: {tensor_x}')
embed_results = self.embed_model.predict(tensor_x)
return embed_results
def process_x_dataset(self,
data: Union[Tuple[List[List[str]], ...], List[List[str]]],
subset: Optional[List[int]] = None) -> Tuple[np.ndarray, ...]:
"""
batch process feature data while training
Args:
data: target dataset
subset: subset index list
Returns:
vectorized feature tensor
"""
x1 = None
if isinstance(data, tuple):
if len(data) == 2:
x0 = self.processor.process_x_dataset(data[0], self.sequence_length, subset)
x1 = self.processor.process_x_dataset(data[1], self.sequence_length, subset)
else:
x0 = self.processor.process_x_dataset(data[0], self.sequence_length, subset)
else:
x0 = self.processor.process_x_dataset(data, self.sequence_length, subset)
if x1 is None:
x1 = np.zeros(x0.shape, dtype=np.int32)
return x0, x1
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG)
# bert_model_path = os.path.join(utils.get_project_path(), 'tests/test-data/bert')
b = BERTEmbedding(task=kashgari.CLASSIFICATION,
model_folder='/Users/brikerman/.kashgari/embedding/bert/chinese_L-12_H-768_A-12',
sequence_length=12)
from kashgari.corpus import SMP2018ECDTCorpus
test_x, test_y = SMP2018ECDTCorpus.load_data('valid')
b.analyze_corpus(test_x, test_y)
data1 = 'all work and no play makes'.split(' ')
data2 = '你 好 啊'.split(' ')
r = b.embed([data1], True)
tokens = b.process_x_dataset([['语', '言', '模', '型']])[0]
target_index = [101, 6427, 6241, 3563, 1798, 102]
target_index = target_index + [0] * (12 - len(target_index))
assert list(tokens[0]) == list(target_index)
print(tokens)
print(r)
print(r.shape)
| 125softNLP | /125softNLP-0.0.1-py3-none-any.whl/kashgari/embeddings/bert_embedding.py | bert_embedding.py |
# encoding: utf-8
# author: BrikerMan
# contact: eliyar917@gmail.com
# blog: https://eliyar.biz
# file: w2v_embedding.py
# time: 2019-05-20 17:32
import logging
from typing import Union, Optional, Dict, Any, List, Tuple
import numpy as np
from gensim.models import KeyedVectors
from tensorflow import keras
from kashgari.embeddings.base_embedding import Embedding
from kashgari.processors.base_processor import BaseProcessor
L = keras.layers
class WordEmbedding(Embedding):
"""Pre-trained word2vec embedding"""
def info(self):
info = super(WordEmbedding, self).info()
info['config'] = {
'w2v_path': self.w2v_path,
'w2v_kwargs': self.w2v_kwargs,
'sequence_length': self.sequence_length
}
return info
def __init__(self,
w2v_path: str,
task: str = None,
w2v_kwargs: Dict[str, Any] = None,
sequence_length: Union[Tuple[int, ...], str, int] = 'auto',
processor: Optional[BaseProcessor] = None,
from_saved_model: bool = False):
"""
Args:
task:
w2v_path: word2vec file path
w2v_kwargs: params pass to the ``load_word2vec_format()`` function of ``gensim.models.KeyedVectors`` -
https://radimrehurek.com/gensim/models/keyedvectors.html#module-gensim.models.keyedvectors
sequence_length: ``'auto'``, ``'variable'`` or integer. When using ``'auto'``, use the 95% of corpus length
as sequence length. When using ``'variable'``, model input shape will set to None, which can handle
various length of input, it will use the length of max sequence in every batch for sequence length.
If using an integer, let's say ``50``, the input output sequence length will set to 50.
processor:
"""
if w2v_kwargs is None:
w2v_kwargs = {}
self.w2v_path = w2v_path
self.w2v_kwargs = w2v_kwargs
self.w2v_model_loaded = False
super(WordEmbedding, self).__init__(task=task,
sequence_length=sequence_length,
embedding_size=0,
processor=processor,
from_saved_model=from_saved_model)
if not from_saved_model:
self._build_token2idx_from_w2v()
if self.sequence_length != 'auto':
self._build_model()
def _build_token2idx_from_w2v(self):
w2v = KeyedVectors.load_word2vec_format(self.w2v_path, **self.w2v_kwargs)
token2idx = {
self.processor.token_pad: 0,
self.processor.token_unk: 1,
self.processor.token_bos: 2,
self.processor.token_eos: 3
}
for token in w2v.index2word:
token2idx[token] = len(token2idx)
vector_matrix = np.zeros((len(token2idx), w2v.vector_size))
vector_matrix[1] = np.random.rand(w2v.vector_size)
vector_matrix[4:] = w2v.vectors
self.embedding_size = w2v.vector_size
self.w2v_vector_matrix = vector_matrix
self.w2v_token2idx = token2idx
self.w2v_top_words = w2v.index2entity[:50]
self.w2v_model_loaded = True
self.processor.token2idx = self.w2v_token2idx
self.processor.idx2token = dict([(value, key) for key, value in self.w2v_token2idx.items()])
logging.debug('------------------------------------------------')
logging.debug('Loaded gensim word2vec model')
logging.debug('model : {}'.format(self.w2v_path))
logging.debug('word count : {}'.format(len(self.w2v_vector_matrix)))
logging.debug('Top 50 word : {}'.format(self.w2v_top_words))
logging.debug('------------------------------------------------')
def _build_model(self, **kwargs):
if self.token_count == 0:
logging.debug('need to build after build_word2idx')
else:
input_tensor = L.Input(shape=(self.sequence_length,),
name=f'input')
layer_embedding = L.Embedding(self.token_count,
self.embedding_size,
weights=[self.w2v_vector_matrix],
trainable=False,
name=f'layer_embedding')
embedded_tensor = layer_embedding(input_tensor)
self.embed_model = keras.Model(input_tensor, embedded_tensor)
def analyze_corpus(self,
x: Union[Tuple[List[List[str]], ...], List[List[str]]],
y: Union[List[List[Any]], List[Any]]):
"""
Prepare embedding layer and pre-processor for labeling task
Args:
x:
y:
Returns:
"""
if not self.w2v_model_loaded:
self._build_token2idx_from_w2v()
super(WordEmbedding, self).analyze_corpus(x, y)
if __name__ == "__main__":
print('hello world')
| 125softNLP | /125softNLP-0.0.1-py3-none-any.whl/kashgari/embeddings/word_embedding.py | word_embedding.py |
# encoding: utf-8
# author: BrikerMan
# contact: eliyar917@gmail.com
# blog: https://eliyar.biz
# file: bare_embedding.py
# time: 2019-05-20 10:36
import logging
from typing import Union, Optional
from tensorflow import keras
from kashgari.embeddings.base_embedding import Embedding
from kashgari.processors.base_processor import BaseProcessor
L = keras.layers
# Todo: A better name for this class
class BareEmbedding(Embedding):
"""Embedding layer without pre-training, train embedding layer while training model"""
def __init__(self,
task: str = None,
sequence_length: Union[int, str] = 'auto',
embedding_size: int = 100,
processor: Optional[BaseProcessor] = None,
from_saved_model: bool = False):
"""
Init bare embedding (embedding without pre-training)
Args:
sequence_length: ``'auto'``, ``'variable'`` or integer. When using ``'auto'``, use the 95% of corpus length
as sequence length. When using ``'variable'``, model input shape will set to None, which can handle
various length of input, it will use the length of max sequence in every batch for sequence length.
If using an integer, let's say ``50``, the input output sequence length will set to 50.
embedding_size: Dimension of the dense embedding.
"""
super(BareEmbedding, self).__init__(task=task,
sequence_length=sequence_length,
embedding_size=embedding_size,
processor=processor,
from_saved_model=from_saved_model)
if not from_saved_model:
self._build_model()
def _build_model(self, **kwargs):
if self.sequence_length == 0 or \
self.sequence_length == 'auto' or \
self.token_count == 0:
logging.debug('need to build after build_word2idx')
else:
input_tensor = L.Input(shape=(self.sequence_length,),
name=f'input')
layer_embedding = L.Embedding(self.token_count,
self.embedding_size,
name=f'layer_embedding')
embedded_tensor = layer_embedding(input_tensor)
self.embed_model = keras.Model(input_tensor, embedded_tensor)
if __name__ == "__main__":
print('hello world')
| 125softNLP | /125softNLP-0.0.1-py3-none-any.whl/kashgari/embeddings/bare_embedding.py | bare_embedding.py |
# encoding: utf-8
# author: BrikerMan
# contact: eliyar917@gmail.com
# blog: https://eliyar.biz
# file: numeric_feature_embedding.py
# time: 2019-05-23 09:04
from typing import Union, Optional, Tuple, List
import numpy as np
from tensorflow import keras
from tensorflow.python.keras.preprocessing.sequence import pad_sequences
import kashgari
from kashgari.embeddings.base_embedding import Embedding
from kashgari.processors.base_processor import BaseProcessor
L = keras.layers
# Todo: A better name for this class
class NumericFeaturesEmbedding(Embedding):
"""Embedding layer without pre-training, train embedding layer while training model"""
def info(self):
info = super(NumericFeaturesEmbedding, self).info()
info['config'] = {
'feature_count': self.feature_count,
'feature_name': self.feature_name,
'sequence_length': self.sequence_length,
'embedding_size': self.embedding_size
}
return info
def __init__(self,
feature_count: int,
feature_name: str,
sequence_length: Union[str, int] = 'auto',
embedding_size: int = None,
processor: Optional[BaseProcessor] = None,
from_saved_model: bool = False):
"""
Init bare embedding (embedding without pre-training)
Args:
sequence_length: ``'auto'``, ``'variable'`` or integer. When using ``'auto'``, use the 95% of corpus length
as sequence length. When using ``'variable'``, model input shape will set to None, which can handle
various length of input, it will use the length of max sequence in every batch for sequence length.
If using an integer, let's say ``50``, the input output sequence length will set to 50.
embedding_size: Dimension of the dense embedding.
"""
# Dummy Type
task = kashgari.CLASSIFICATION
if embedding_size is None:
embedding_size = feature_count * 8
super(NumericFeaturesEmbedding, self).__init__(task=task,
sequence_length=sequence_length,
embedding_size=embedding_size,
processor=processor,
from_saved_model=from_saved_model)
self.feature_count = feature_count
self.feature_name = feature_name
if not from_saved_model:
self._build_model()
def _build_model(self, **kwargs):
input_tensor = L.Input(shape=(self.sequence_length,),
name=f'input_{self.feature_name}')
layer_embedding = L.Embedding(self.feature_count + 1,
self.embedding_size,
name=f'layer_embedding_{self.feature_name}')
embedded_tensor = layer_embedding(input_tensor)
self.embed_model = keras.Model(input_tensor, embedded_tensor)
def analyze_corpus(self,
x: Union[Tuple[List[List[str]], ...], List[List[str]]],
y: Union[List[List[str]], List[str]]):
pass
def process_x_dataset(self,
data: List[List[str]],
subset: Optional[List[int]] = None) -> Tuple[np.ndarray, ...]:
"""
batch process feature data while training
Args:
data: target dataset
subset: subset index list
Returns:
vectorized feature tensor
"""
if subset is not None:
numerized_samples = kashgari.utils.get_list_subset(data, subset)
else:
numerized_samples = data
return pad_sequences(numerized_samples, self.sequence_length, padding='post', truncating='post')
if __name__ == "__main__":
e = NumericFeaturesEmbedding(2, feature_name='is_bold', sequence_length=10)
e.embed_model.summary()
print(e.embed_one([1, 2]))
print("Hello world")
| 125softNLP | /125softNLP-0.0.1-py3-none-any.whl/kashgari/embeddings/numeric_feature_embedding.py | numeric_feature_embedding.py |
# encoding: utf-8
# author: BrikerMan
# contact: eliyar917@gmail.com
# blog: https://eliyar.biz
# file: __init__.py.py
# time: 2019-05-20 11:21
from kashgari.embeddings.bare_embedding import BareEmbedding
from kashgari.embeddings.bert_embedding import BERTEmbedding
from kashgari.embeddings.word_embedding import WordEmbedding
from kashgari.embeddings.numeric_feature_embedding import NumericFeaturesEmbedding
from kashgari.embeddings.stacked_embedding import StackedEmbedding
from kashgari.embeddings.gpt_2_embedding import GPT2Embedding
| 125softNLP | /125softNLP-0.0.1-py3-none-any.whl/kashgari/embeddings/__init__.py | __init__.py |
# encoding: utf-8
# author: AlexWang
# contact: ialexwwang@gmail.com
# file: attention_weighted_average.py
# time: 2019-06-25 16:35
import kashgari
import tensorflow as tf
from tensorflow.python import keras
from tensorflow.python.keras import backend as K
L = keras.layers
InputSpec = L.InputSpec
class KMaxPoolingLayer(L.Layer):
'''
K-max pooling layer that extracts the k-highest activation from a sequence (2nd dimension).
TensorFlow backend.
# Arguments
k: An int scale,
indicate k max steps of features to pool.
sorted: A bool,
if output is sorted (default) or not.
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, steps, features)` while `channels_first`
corresponds to inputs with shape
`(batch, features, steps)`.
# Input shape
- If `data_format='channels_last'`:
3D tensor with shape:
`(batch_size, steps, features)`
- If `data_format='channels_first'`:
3D tensor with shape:
`(batch_size, features, steps)`
# Output shape
3D tensor with shape:
`(batch_size, top-k-steps, features)`
'''
def __init__(self, k=1, sorted=True, data_format='channels_last', **kwargs): # noqa: A002
super(KMaxPoolingLayer, self).__init__(**kwargs)
self.input_spec = InputSpec(ndim=3)
self.k = k
self.sorted = sorted
if data_format.lower() in ['channels_first', 'channels_last']:
self.data_format = data_format.lower()
else:
self.data_format = K.image_data_format()
def compute_output_shape(self, input_shape):
if self.data_format == 'channels_first':
return (input_shape[0], self.k, input_shape[1])
else:
return (input_shape[0], self.k, input_shape[2])
def call(self, inputs):
if self.data_format == 'channels_last':
# swap last two dimensions since top_k will be applied along the last dimension
shifted_input = tf.transpose(inputs, [0, 2, 1])
# extract top_k, returns two tensors [values, indices]
top_k = tf.nn.top_k(shifted_input, k=self.k, sorted=self.sorted)[0]
else:
top_k = tf.nn.top_k(inputs, k=self.k, sorted=self.sorted)[0]
# return flattened output
return tf.transpose(top_k, [0, 2, 1])
def get_config(self):
config = {'k': self.k,
'sorted': self.sorted,
'data_format': self.data_format}
base_config = super(KMaxPoolingLayer, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
KMaxPooling = KMaxPoolingLayer
KMaxPoolLayer = KMaxPoolingLayer
kashgari.custom_objects['KMaxPoolingLayer'] = KMaxPoolingLayer
kashgari.custom_objects['KMaxPooling'] = KMaxPooling
kashgari.custom_objects['KMaxPoolLayer'] = KMaxPoolLayer
if __name__ == '__main__':
print('Hello world, KMaxPoolLayer/KMaxPoolingLayer.')
| 125softNLP | /125softNLP-0.0.1-py3-none-any.whl/kashgari/layers/kmax_pool_layer.py | kmax_pool_layer.py |
# encoding: utf-8
# author: AlexWang
# contact: ialexwwang@gmail.com
# file: attention_weighted_average.py
# time: 2019-06-24 19:35
from tensorflow.python import keras
from tensorflow.python.keras import backend as K
import kashgari
L = keras.layers
initializers = keras.initializers
InputSpec = L.InputSpec
class AttentionWeightedAverageLayer(L.Layer):
'''
Computes a weighted average of the different channels across timesteps.
Uses 1 parameter pr. channel to compute the attention value for a single timestep.
'''
def __init__(self, return_attention=False, **kwargs):
self.init = initializers.get('uniform')
self.supports_masking = True
self.return_attention = return_attention
super(AttentionWeightedAverageLayer, self).__init__(**kwargs)
def build(self, input_shape):
self.input_spec = [InputSpec(ndim=3)]
assert len(input_shape) == 3
self.W = self.add_weight(shape=(input_shape[2].value, 1),
name='{}_w'.format(self.name),
initializer=self.init,
trainable=True
)
# self.trainable_weights = [self.W]
super(AttentionWeightedAverageLayer, self).build(input_shape)
def call(self, x, mask=None):
# computes a probability distribution over the timesteps
# uses 'max trick' for numerical stability
# reshape is done to avoid issue with Tensorflow
# and 1-dimensional weights
logits = K.dot(x, self.W)
x_shape = K.shape(x)
logits = K.reshape(logits, (x_shape[0], x_shape[1]))
ai = K.exp(logits - K.max(logits, axis=-1, keepdims=True))
# masked timesteps have zero weight
if mask is not None:
mask = K.cast(mask, K.floatx())
ai = ai * mask
att_weights = ai / (K.sum(ai, axis=1, keepdims=True) + K.epsilon())
weighted_input = x * K.expand_dims(att_weights)
result = K.sum(weighted_input, axis=1)
if self.return_attention:
return [result, att_weights]
return result
def get_output_shape_for(self, input_shape):
return self.compute_output_shape(input_shape)
def compute_output_shape(self, input_shape):
output_len = input_shape[2]
if self.return_attention:
return [(input_shape[0], output_len), (input_shape[0], input_shape[1])]
return (input_shape[0], output_len)
def compute_mask(self, inputs, input_mask=None):
if isinstance(input_mask, list):
return [None] * len(input_mask)
else:
return None
def get_config(self):
config = {'return_attention': self.return_attention, }
base_config = super(AttentionWeightedAverageLayer, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
AttentionWeightedAverage = AttentionWeightedAverageLayer
AttWgtAvgLayer = AttentionWeightedAverageLayer
kashgari.custom_objects['AttentionWeightedAverageLayer'] = AttentionWeightedAverageLayer
kashgari.custom_objects['AttentionWeightedAverage'] = AttentionWeightedAverage
kashgari.custom_objects['AttWgtAvgLayer'] = AttWgtAvgLayer
if __name__ == '__main__':
print('Hello world, AttentionWeightedAverageLayer/AttWgtAvgLayer.')
| 125softNLP | /125softNLP-0.0.1-py3-none-any.whl/kashgari/layers/att_wgt_avg_layer.py | att_wgt_avg_layer.py |
# encoding: utf-8
# author: BrikerMan
# contact: eliyar917@gmail.com
# blog: https://eliyar.biz
# file: crf.py
# time: 2019-06-28 14:33
import tensorflow as tf
class CRF(tf.keras.layers.Layer):
"""
Conditional Random Field layer (tf.keras)
`CRF` can be used as the last layer in a network (as a classifier). Input shape (features)
must be equal to the number of classes the CRF can predict (a linear layer is recommended).
Note: the loss and accuracy functions of networks using `CRF` must
use the provided loss and accuracy functions (denoted as loss and viterbi_accuracy)
as the classification of sequences are used with the layers internal weights.
Args:
output_dim (int): the number of labels to tag each temporal input.
Input shape:
nD tensor with shape `(batch_size, sentence length, num_classes)`.
Output shape:
nD tensor with shape: `(batch_size, sentence length, num_classes)`.
"""
def __init__(self,
output_dim,
mode='reg',
supports_masking=False,
transitions=None,
**kwargs):
self.transitions = None
super(CRF, self).__init__(**kwargs)
self.output_dim = int(output_dim)
self.mode = mode
if self.mode == 'pad':
self.input_spec = [tf.keras.layers.InputSpec(min_ndim=3), tf.keras.layers.InputSpec(min_ndim=2)]
elif self.mode == 'reg':
self.input_spec = tf.keras.layers.InputSpec(min_ndim=3)
else:
raise ValueError
self.supports_masking = supports_masking
self.sequence_lengths = None
def get_config(self):
config = {
'output_dim': self.output_dim,
'mode': self.mode,
'supports_masking': self.supports_masking,
'transitions': tf.keras.backend.eval(self.transitions)
}
base_config = super(CRF, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def build(self, input_shape):
if self.mode == 'pad':
assert len(input_shape) == 2
assert len(input_shape[0]) == 3
assert len(input_shape[1]) == 2
f_shape = tf.TensorShape(input_shape[0])
input_spec = [tf.keras.layers.InputSpec(min_ndim=3, axes={-1: f_shape[-1]}),
tf.keras.layers.InputSpec(min_ndim=2, axes={-1: 1}, dtype=tf.int32)]
else:
assert len(input_shape) == 3
f_shape = tf.TensorShape(input_shape)
input_spec = tf.keras.layers.InputSpec(min_ndim=3, axes={-1: f_shape[-1]})
if f_shape[-1] is None:
raise ValueError('The last dimension of the inputs to `CRF` should be defined. Found `None`.')
if f_shape[-1] != self.output_dim:
raise ValueError('The last dimension of the input shape must be equal to output shape. '
'Use a linear layer if needed.')
self.input_spec = input_spec
self.transitions = self.add_weight(name='transitions',
shape=[self.output_dim, self.output_dim],
initializer='glorot_uniform',
trainable=True)
self.built = True
def call(self, inputs, **kwargs):
if self.mode == 'pad':
sequences = tf.convert_to_tensor(inputs[0], dtype=self.dtype)
self.sequence_lengths = tf.keras.backend.flatten(inputs[-1])
else:
sequences = tf.convert_to_tensor(inputs, dtype=self.dtype)
shape = tf.shape(inputs)
self.sequence_lengths = tf.ones(shape[0], dtype=tf.int32) * (shape[1])
viterbi_sequence, _ = tf.contrib.crf.crf_decode(sequences, self.transitions,
self.sequence_lengths)
output = tf.keras.backend.one_hot(viterbi_sequence, self.output_dim)
return tf.keras.backend.in_train_phase(sequences, output)
def loss(self, y_true, y_pred):
y_pred = tf.convert_to_tensor(y_pred, dtype=self.dtype)
log_likelihood, self.transitions = tf.contrib.crf.crf_log_likelihood(y_pred,
tf.cast(tf.keras.backend.argmax(y_true),
dtype=tf.int32),
self.sequence_lengths,
transition_params=self.transitions)
return tf.reduce_mean(-log_likelihood)
def compute_output_shape(self, input_shape):
if self.mode == 'pad':
data_shape = input_shape[0]
else:
data_shape = input_shape
tf.TensorShape(data_shape).assert_has_rank(3)
return data_shape[:2] + (self.output_dim,)
@property
def viterbi_accuracy(self):
def accuracy(y_true, y_pred):
shape = tf.shape(y_pred)
sequence_lengths = tf.ones(shape[0], dtype=tf.int32) * (shape[1])
viterbi_sequence, _ = tf.contrib.crf.crf_decode(y_pred, self.transitions, sequence_lengths)
output = tf.keras.backend.one_hot(viterbi_sequence, self.output_dim)
return tf.keras.metrics.categorical_accuracy(y_true, output)
accuracy.func_name = 'viterbi_accuracy'
return accuracy
| 125softNLP | /125softNLP-0.0.1-py3-none-any.whl/kashgari/layers/crf.py | crf.py |
# encoding: utf-8
# author: BrikerMan
# contact: eliyar917@gmail.com
# blog: https://eliyar.biz
# file: non_masking_layer.py
# time: 2019-05-23 14:05
import kashgari
from tensorflow.python.keras.layers import Layer
class NonMaskingLayer(Layer):
"""
fix convolutional 1D can't receive masked input, detail: https://github.com/keras-team/keras/issues/4978
thanks for https://github.com/jacoxu
"""
def __init__(self, **kwargs):
self.supports_masking = True
super(NonMaskingLayer, self).__init__(**kwargs)
def build(self, input_shape):
pass
def compute_mask(self, inputs, input_mask=None):
# do not pass the mask to the next layers
return None
def call(self, x, mask=None):
return x
kashgari.custom_objects['NonMaskingLayer'] = NonMaskingLayer
if __name__ == "__main__":
print("Hello world")
| 125softNLP | /125softNLP-0.0.1-py3-none-any.whl/kashgari/layers/non_masking_layer.py | non_masking_layer.py |
# encoding: utf-8
# author: BrikerMan
# contact: eliyar917@gmail.com
# blog: https://eliyar.biz
# file: __init__.py
# time: 2019-05-23 14:05
import tensorflow as tf
from tensorflow.python import keras
from kashgari.layers.non_masking_layer import NonMaskingLayer
from kashgari.layers.att_wgt_avg_layer import AttentionWeightedAverageLayer
from kashgari.layers.att_wgt_avg_layer import AttentionWeightedAverage, AttWgtAvgLayer
from kashgari.layers.kmax_pool_layer import KMaxPoolingLayer, KMaxPoolLayer, KMaxPooling
L = keras.layers
if __name__ == "__main__":
print("Hello world")
| 125softNLP | /125softNLP-0.0.1-py3-none-any.whl/kashgari/layers/__init__.py | __init__.py |
# encoding: utf-8
# author: BrikerMan
# contact: eliyar917@gmail.com
# blog: https://eliyar.biz
# file: scoring_processor.py
# time: 11:10 上午
from typing import List, Optional
import numpy as np
import kashgari
from kashgari import utils
from kashgari.processors.base_processor import BaseProcessor
def is_numeric(obj):
attrs = ['__add__', '__sub__', '__mul__', '__truediv__', '__pow__']
return all(hasattr(obj, attr) for attr in attrs)
class ScoringProcessor(BaseProcessor):
"""
Corpus Pre Processor class
"""
def __init__(self, output_dim=None, **kwargs):
super(ScoringProcessor, self).__init__(**kwargs)
self.output_dim = output_dim
def info(self):
info = super(ScoringProcessor, self).info()
info['task'] = kashgari.SCORING
return info
def _build_label_dict(self,
label_list: List[List[float]]):
"""
Build label2idx dict for sequence labeling task
Args:
label_list: corpus label list
"""
if self.output_dim is None:
label_sample = label_list[0]
if isinstance(label_sample, np.ndarray) and len(label_sample.shape) == 1:
self.output_dim = label_sample.shape[0]
elif is_numeric(label_sample):
self.output_dim = 1
elif isinstance(label_sample, list):
self.output_dim = len(label_sample)
else:
raise ValueError('Scoring Label Sample must be a float, float array or 1D numpy array')
# np_labels = np.array(label_list)
# if np_labels.max() > 1 or np_labels.min() < 0:
# raise ValueError('Scoring Label Sample must be in range[0,1]')
def process_y_dataset(self,
data: List[List[str]],
max_len: Optional[int] = None,
subset: Optional[List[int]] = None) -> np.ndarray:
if subset is not None:
target = utils.get_list_subset(data, subset)
else:
target = data[:]
y = np.array(target)
return y
def numerize_token_sequences(self,
sequences: List[List[str]]):
result = []
for seq in sequences:
if self.add_bos_eos:
seq = [self.token_bos] + seq + [self.token_eos]
unk_index = self.token2idx[self.token_unk]
result.append([self.token2idx.get(token, unk_index) for token in seq])
return result
def numerize_label_sequences(self,
sequences: List[List[str]]) -> List[List[int]]:
return sequences
def reverse_numerize_label_sequences(self,
sequences,
lengths=None):
return sequences
if __name__ == "__main__":
from kashgari.corpus import SMP2018ECDTCorpus
x, y = SMP2018ECDTCorpus.load_data()
x = x[:3]
y = [0.2, 0.3, 0.2]
p = ScoringProcessor()
p.analyze_corpus(x, y)
print(p.process_y_dataset(y))
| 125softNLP | /125softNLP-0.0.1-py3-none-any.whl/kashgari/processors/scoring_processor.py | scoring_processor.py |
# encoding: utf-8
# author: BrikerMan
# contact: eliyar917@gmail.com
# blog: https://eliyar.biz
# file: base_processor.py
# time: 2019-05-21 11:27
import collections
import logging
import operator
from typing import List, Optional, Union, Dict, Any
import numpy as np
from tensorflow.python.keras.preprocessing.sequence import pad_sequences
from kashgari import utils
class BaseProcessor(object):
"""
Corpus Pre Processor class
"""
def __init__(self, **kwargs):
self.token2idx: Dict[str, int] = kwargs.get('token2idx', {})
self.idx2token: Dict[int, str] = dict([(v, k) for (k, v) in self.token2idx.items()])
self.token2count: Dict = {}
self.label2idx: Dict[str, int] = kwargs.get('label2idx', {})
self.idx2label: Dict[int, str] = dict([(v, k) for (k, v) in self.label2idx.items()])
self.token_pad: str = kwargs.get('token_pad', '<PAD>')
self.token_unk: str = kwargs.get('token_unk', '<UNK>')
self.token_bos: str = kwargs.get('token_bos', '<BOS>')
self.token_eos: str = kwargs.get('token_eos', '<EOS>')
self.dataset_info: Dict[str, Any] = kwargs.get('dataset_info', {})
self.add_bos_eos: bool = kwargs.get('add_bos_eos', False)
self.sequence_length = kwargs.get('sequence_length', None)
self.min_count = kwargs.get('min_count', 3)
def info(self):
return {
'class_name': self.__class__.__name__,
'config': {
'label2idx': self.label2idx,
'token2idx': self.token2idx,
'token_pad': self.token_pad,
'token_unk': self.token_unk,
'token_bos': self.token_bos,
'token_eos': self.token_eos,
'dataset_info': self.dataset_info,
'add_bos_eos': self.add_bos_eos,
'sequence_length': self.sequence_length
},
'module': self.__class__.__module__,
}
def analyze_corpus(self,
corpus: Union[List[List[str]]],
labels: Union[List[List[str]], List[str]],
force: bool = False):
rec_len = sorted([len(seq) for seq in corpus])[int(0.95 * len(corpus))]
self.dataset_info['RECOMMEND_LEN'] = rec_len
if len(self.token2idx) == 0 or force:
self._build_token_dict(corpus, self.min_count)
if len(self.label2idx) == 0 or force:
self._build_label_dict(labels)
def _build_token_dict(self, corpus: List[List[str]], min_count: int = 3):
"""
Build token index dictionary using corpus
Args:
corpus: List of tokenized sentences, like ``[['I', 'love', 'tf'], ...]``
min_count:
"""
token2idx = {
self.token_pad: 0,
self.token_unk: 1,
self.token_bos: 2,
self.token_eos: 3
}
token2count = {}
for sentence in corpus:
for token in sentence:
count = token2count.get(token, 0)
token2count[token] = count + 1
self.token2count = token2count
# 按照词频降序排序
sorted_token2count = sorted(token2count.items(),
key=operator.itemgetter(1),
reverse=True)
token2count = collections.OrderedDict(sorted_token2count)
for token, token_count in token2count.items():
if token not in token2idx and token_count >= min_count:
token2idx[token] = len(token2idx)
self.token2idx = token2idx
self.idx2token = dict([(value, key)
for key, value in self.token2idx.items()])
logging.debug(f"build token2idx dict finished, contains {len(self.token2idx)} tokens.")
self.dataset_info['token_count'] = len(self.token2idx)
def _build_label_dict(self, corpus: Union[List[List[str]], List[str]]):
raise NotImplementedError
def process_x_dataset(self,
data: List[List[str]],
max_len: Optional[int] = None,
subset: Optional[List[int]] = None) -> np.ndarray:
if max_len is None:
max_len = self.sequence_length
if subset is not None:
target = utils.get_list_subset(data, subset)
else:
target = data
numerized_samples = self.numerize_token_sequences(target)
return pad_sequences(numerized_samples, max_len, padding='post', truncating='post')
def process_y_dataset(self,
data: Union[List[List[str]], List[str]],
max_len: Optional[int],
subset: Optional[List[int]] = None) -> np.ndarray:
raise NotImplementedError
def numerize_token_sequences(self,
sequences: List[List[str]]):
raise NotImplementedError
def numerize_label_sequences(self,
sequences: List[List[str]]) -> List[List[int]]:
raise NotImplementedError
def reverse_numerize_label_sequences(self, sequence, **kwargs):
raise NotImplementedError
def __repr__(self):
return f"<{self.__class__}>"
def __str__(self):
return self.__repr__()
if __name__ == "__main__":
print("Hello world")
| 125softNLP | /125softNLP-0.0.1-py3-none-any.whl/kashgari/processors/base_processor.py | base_processor.py |
# encoding: utf-8
# author: BrikerMan
# contact: eliyar917@gmail.com
# blog: https://eliyar.biz
# version: 1.0
# license: Apache Licence
# file: corpus.py
# time: 2019-05-17 11:28
import collections
import logging
import operator
from typing import List, Dict, Optional
import numpy as np
from tensorflow.python.keras.preprocessing.sequence import pad_sequences
from tensorflow.python.keras.utils import to_categorical
import kashgari
from kashgari import utils
from kashgari.processors.base_processor import BaseProcessor
class LabelingProcessor(BaseProcessor):
"""
Corpus Pre Processor class
"""
def info(self):
info = super(LabelingProcessor, self).info()
info['task'] = kashgari.LABELING
return info
def _build_label_dict(self,
label_list: List[List[str]]):
"""
Build label2idx dict for sequence labeling task
Args:
label_list: corpus label list
"""
label2idx: Dict[str: int] = {
self.token_pad: 0
}
token2count = {}
for sequence in label_list:
for label in sequence:
count = token2count.get(label, 0)
token2count[label] = count + 1
sorted_token2count = sorted(token2count.items(),
key=operator.itemgetter(1),
reverse=True)
token2count = collections.OrderedDict(sorted_token2count)
for token in token2count.keys():
if token not in label2idx:
label2idx[token] = len(label2idx)
self.label2idx = label2idx
self.idx2label = dict([(value, key)
for key, value in self.label2idx.items()])
logging.debug(f"build label2idx dict finished, contains {len(self.label2idx)} labels.")
def process_y_dataset(self,
data: List[List[str]],
max_len: Optional[int] = None,
subset: Optional[List[int]] = None) -> np.ndarray:
if subset is not None:
target = utils.get_list_subset(data, subset)
else:
target = data[:]
numerized_samples = self.numerize_label_sequences(target)
padded_seq = pad_sequences(
numerized_samples, max_len, padding='post', truncating='post')
return to_categorical(padded_seq, len(self.label2idx))
def numerize_token_sequences(self,
sequences: List[List[str]]):
result = []
for seq in sequences:
if self.add_bos_eos:
seq = [self.token_bos] + seq + [self.token_eos]
unk_index = self.token2idx[self.token_unk]
result.append([self.token2idx.get(token, unk_index) for token in seq])
return result
def numerize_label_sequences(self,
sequences: List[List[str]]) -> List[List[int]]:
result = []
for seq in sequences:
if self.add_bos_eos:
seq = [self.token_pad] + seq + [self.token_pad]
result.append([self.label2idx[label] for label in seq])
return result
def reverse_numerize_label_sequences(self,
sequences,
lengths=None):
result = []
for index, seq in enumerate(sequences):
labels = []
if self.add_bos_eos:
seq = seq[1:]
for idx in seq:
labels.append(self.idx2label[idx])
if lengths is not None:
labels = labels[:lengths[index]]
result.append(labels)
return result
if __name__ == "__main__":
from kashgari.corpus import ChineseDailyNerCorpus
x, y = ChineseDailyNerCorpus.load_data()
p = LabelingProcessor()
p.analyze_corpus(x, y)
r = p.process_x_dataset(x, subset=[10, 12, 20])
print(r)
| 125softNLP | /125softNLP-0.0.1-py3-none-any.whl/kashgari/processors/labeling_processor.py | labeling_processor.py |
# encoding: utf-8
# author: BrikerMan
# contact: eliyar917@gmail.com
# blog: https://eliyar.biz
# file: __init__.py.py
# time: 2019-05-20 10:54
from kashgari.processors.classification_processor import ClassificationProcessor
from kashgari.processors.labeling_processor import LabelingProcessor
from kashgari.processors.scoring_processor import ScoringProcessor
| 125softNLP | /125softNLP-0.0.1-py3-none-any.whl/kashgari/processors/__init__.py | __init__.py |
from typing import List, Optional
import numpy as np
from tensorflow.python.keras.utils import to_categorical
import kashgari
from kashgari import utils
from kashgari.processors.base_processor import BaseProcessor
from sklearn.preprocessing import MultiLabelBinarizer
class ClassificationProcessor(BaseProcessor):
"""
Corpus Pre Processor class
"""
def __init__(self, multi_label=False, **kwargs):
super(ClassificationProcessor, self).__init__(**kwargs)
self.multi_label = multi_label
if self.label2idx:
self.multi_label_binarizer: MultiLabelBinarizer = MultiLabelBinarizer(classes=list(self.label2idx.keys()))
self.multi_label_binarizer.fit([])
else:
self.multi_label_binarizer: MultiLabelBinarizer = None
def info(self):
info = super(ClassificationProcessor, self).info()
info['task'] = kashgari.CLASSIFICATION
info['config']['multi_label'] = self.multi_label
return info
def _build_label_dict(self,
labels: List[str]):
if self.multi_label:
label_set = set()
for i in labels:
label_set = label_set.union(list(i))
else:
label_set = set(labels)
self.label2idx = {}
for idx, label in enumerate(sorted(label_set)):
self.label2idx[label] = len(self.label2idx)
self.idx2label = dict([(value, key) for key, value in self.label2idx.items()])
self.dataset_info['label_count'] = len(self.label2idx)
self.multi_label_binarizer = MultiLabelBinarizer(classes=list(self.label2idx.keys()))
def process_y_dataset(self,
data: List[str],
max_len: Optional[int] = None,
subset: Optional[List[int]] = None) -> np.ndarray:
if subset is not None:
target = utils.get_list_subset(data, subset)
else:
target = data
if self.multi_label:
return self.multi_label_binarizer.fit_transform(target)
else:
numerized_samples = self.numerize_label_sequences(target)
return to_categorical(numerized_samples, len(self.label2idx))
def numerize_token_sequences(self,
sequences: List[List[str]]):
result = []
for seq in sequences:
if self.add_bos_eos:
seq = [self.token_bos] + seq + [self.token_eos]
unk_index = self.token2idx[self.token_unk]
result.append([self.token2idx.get(token, unk_index) for token in seq])
return result
def numerize_label_sequences(self,
sequences: List[str]) -> List[int]:
"""
Convert label sequence to label-index sequence
``['O', 'O', 'B-ORG'] -> [0, 0, 2]``
Args:
sequences: label sequence, list of str
Returns:
label-index sequence, list of int
"""
return [self.label2idx[label] for label in sequences]
def reverse_numerize_label_sequences(self, sequences, **kwargs):
if self.multi_label:
return self.multi_label_binarizer.inverse_transform(sequences)
else:
return [self.idx2label[label] for label in sequences]
if __name__ == "__main__":
from kashgari.corpus import SMP2018ECDTCorpus
x, y = SMP2018ECDTCorpus.load_data()
p = ClassificationProcessor()
p.analyze_corpus(x, y)
| 125softNLP | /125softNLP-0.0.1-py3-none-any.whl/kashgari/processors/classification_processor.py | classification_processor.py |
# -*- coding: utf-8 -*-
# @Time : 2020/11/6-14:52
# @Author : 贾志凯 15716539228@163.com
# @File : __init__.py.py
# @Software: win10 python3.6 PyCharm
| 125softNLP | /125softNLP-0.0.1-py3-none-any.whl/tests/__init__.py | __init__.py |
from bert.graph import import_tf
from bert import modeling
from bert import tokenization
from bert.graph import optimize_graph
from bert import args
from queue import Queue
from threading import Thread
tf = import_tf(0, True)
class InputExample(object):
def __init__(self, unique_id, text_a, text_b):
self.unique_id = unique_id
self.text_a = text_a
self.text_b = text_b
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self, unique_id, tokens, input_ids, input_mask, input_type_ids):
self.unique_id = unique_id
self.tokens = tokens
self.input_ids = input_ids
self.input_mask = input_mask
self.input_type_ids = input_type_ids
class BertVector:
def __init__(self, batch_size=32, pooling_strategy="REDUCE_MEAN", max_seq_len=40):
"""
init BertVector
:param batch_size: Depending on your memory default is 32
"""
self.max_seq_length = max_seq_len
self.layer_indexes = args.layer_indexes
self.gpu_memory_fraction = 1
if pooling_strategy == "NONE":
pooling_strategy = args.PoolingStrategy.NONE
elif pooling_strategy == "REDUCE_MAX":
pooling_strategy = args.PoolingStrategy.REDUCE_MAX
elif pooling_strategy == "REDUCE_MEAN":
pooling_strategy = args.PoolingStrategy.REDUCE_MEAN
elif pooling_strategy == "REDUCE_MEAN_MAX":
pooling_strategy = args.PoolingStrategy.REDUCE_MEAN_MAX
self.graph_path = optimize_graph(pooling_strategy=pooling_strategy, max_seq_len=self.max_seq_length)
self.tokenizer = tokenization.FullTokenizer(vocab_file=args.vocab_file, do_lower_case=True)
self.batch_size = batch_size
self.estimator = self.get_estimator()
self.input_queue = Queue(maxsize=1)
self.output_queue = Queue(maxsize=1)
self.predict_thread = Thread(target=self.predict_from_queue, daemon=True)
self.predict_thread.start()
def get_estimator(self):
from tensorflow.python.estimator.estimator import Estimator
from tensorflow.python.estimator.run_config import RunConfig
from tensorflow.python.estimator.model_fn import EstimatorSpec
def model_fn(features, labels, mode, params):
with tf.gfile.GFile(self.graph_path, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
input_names = ['input_ids', 'input_mask', 'input_type_ids']
output = tf.import_graph_def(graph_def,
input_map={k + ':0': features[k] for k in input_names},
return_elements=['final_encodes:0'])
return EstimatorSpec(mode=mode, predictions={
'encodes': output[0]
})
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.gpu_options.per_process_gpu_memory_fraction = self.gpu_memory_fraction
config.log_device_placement = False
config.graph_options.optimizer_options.global_jit_level = tf.OptimizerOptions.ON_1
return Estimator(model_fn=model_fn, config=RunConfig(session_config=config),
params={'batch_size': self.batch_size})
def predict_from_queue(self):
prediction = self.estimator.predict(input_fn=self.queue_predict_input_fn, yield_single_examples=False)
for i in prediction:
self.output_queue.put(i)
def encode(self, sentence):
self.input_queue.put(sentence)
prediction = self.output_queue.get()
return prediction
def queue_predict_input_fn(self):
return (tf.data.Dataset.from_generator(
self.generate_from_queue,
output_types={'unique_ids': tf.int32,
'input_ids': tf.int32,
'input_mask': tf.int32,
'input_type_ids': tf.int32},
output_shapes={
'unique_ids': (1,),
'input_ids': (None, self.max_seq_length),
'input_mask': (None, self.max_seq_length),
'input_type_ids': (None, self.max_seq_length)}))
def generate_from_queue(self):
while True:
features = list(self.convert_examples_to_features(seq_length=self.max_seq_length, tokenizer=self.tokenizer))
yield {
'unique_ids': [f.unique_id for f in features],
'input_ids': [f.input_ids for f in features],
'input_mask': [f.input_mask for f in features],
'input_type_ids': [f.input_type_ids for f in features]
}
def input_fn_builder(self, features, seq_length):
"""Creates an `input_fn` closure to be passed to Estimator."""
all_unique_ids = []
all_input_ids = []
all_input_mask = []
all_input_type_ids = []
for feature in features:
all_unique_ids.append(feature.unique_id)
all_input_ids.append(feature.input_ids)
all_input_mask.append(feature.input_mask)
all_input_type_ids.append(feature.input_type_ids)
def input_fn(params):
"""The actual input function."""
batch_size = params["batch_size"]
num_examples = len(features)
# This is for demo purposes and does NOT scale to large data sets. We do
# not use Dataset.from_generator() because that uses tf.py_func which is
# not TPU compatible. The right way to load data is with TFRecordReader.
d = tf.data.Dataset.from_tensor_slices({
"unique_ids":
tf.constant(all_unique_ids, shape=[num_examples], dtype=tf.int32),
"input_ids":
tf.constant(
all_input_ids, shape=[num_examples, seq_length],
dtype=tf.int32),
"input_mask":
tf.constant(
all_input_mask,
shape=[num_examples, seq_length],
dtype=tf.int32),
"input_type_ids":
tf.constant(
all_input_type_ids,
shape=[num_examples, seq_length],
dtype=tf.int32),
})
d = d.batch(batch_size=batch_size, drop_remainder=False)
return d
return input_fn
def model_fn_builder(self, bert_config, init_checkpoint, layer_indexes):
"""Returns `model_fn` closure for TPUEstimator."""
def model_fn(features, labels, mode, params): # pylint: disable=unused-argument
"""The `model_fn` for TPUEstimator."""
unique_ids = features["unique_ids"]
input_ids = features["input_ids"]
input_mask = features["input_mask"]
input_type_ids = features["input_type_ids"]
jit_scope = tf.contrib.compiler.jit.experimental_jit_scope
with jit_scope():
model = modeling.BertModel(
config=bert_config,
is_training=False,
input_ids=input_ids,
input_mask=input_mask,
token_type_ids=input_type_ids)
if mode != tf.estimator.ModeKeys.PREDICT:
raise ValueError("Only PREDICT modes are supported: %s" % (mode))
tvars = tf.trainable_variables()
(assignment_map, initialized_variable_names) = modeling.get_assignment_map_from_checkpoint(tvars,
init_checkpoint)
tf.logging.info("**** Trainable Variables ****")
for var in tvars:
init_string = ""
if var.name in initialized_variable_names:
init_string = ", *INIT_FROM_CKPT*"
tf.logging.info(" name = %s, shape = %s%s", var.name, var.shape,
init_string)
all_layers = model.get_all_encoder_layers()
predictions = {
"unique_id": unique_ids,
}
for (i, layer_index) in enumerate(layer_indexes):
predictions["layer_output_%d" % i] = all_layers[layer_index]
from tensorflow.python.estimator.model_fn import EstimatorSpec
output_spec = EstimatorSpec(mode=mode, predictions=predictions)
return output_spec
return model_fn
def convert_examples_to_features(self, seq_length, tokenizer):
"""Loads a data file into a list of `InputBatch`s."""
features = []
input_masks = []
examples = self._to_example(self.input_queue.get())
for (ex_index, example) in enumerate(examples):
tokens_a = tokenizer.tokenize(example.text_a)
# if the sentences's length is more than seq_length, only use sentence's left part
if len(tokens_a) > seq_length - 2:
tokens_a = tokens_a[0:(seq_length - 2)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens = []
input_type_ids = []
tokens.append("[CLS]")
input_type_ids.append(0)
for token in tokens_a:
tokens.append(token)
input_type_ids.append(0)
tokens.append("[SEP]")
input_type_ids.append(0)
# Where "input_ids" are tokens's index in vocabulary
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
input_masks.append(input_mask)
# Zero-pad up to the sequence length.
while len(input_ids) < seq_length:
input_ids.append(0)
input_mask.append(0)
input_type_ids.append(0)
assert len(input_ids) == seq_length
assert len(input_mask) == seq_length
assert len(input_type_ids) == seq_length
if ex_index < 5:
tf.logging.info("*** Example ***")
tf.logging.info("unique_id: %s" % (example.unique_id))
tf.logging.info("tokens: %s" % " ".join(
[tokenization.printable_text(x) for x in tokens]))
tf.logging.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
tf.logging.info("input_mask: %s" % " ".join([str(x) for x in input_mask]))
tf.logging.info(
"input_type_ids: %s" % " ".join([str(x) for x in input_type_ids]))
yield InputFeatures(
unique_id=example.unique_id,
tokens=tokens,
input_ids=input_ids,
input_mask=input_mask,
input_type_ids=input_type_ids)
def _truncate_seq_pair(self, tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
# This is a simple heuristic which will always truncate the longer sequence
# one token at a time. This makes more sense than truncating an equal percent
# of tokens from each, since if one sequence is very short then each token
# that's truncated likely contains more information than a longer sequence.
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
@staticmethod
def _to_example(sentences):
import re
"""
sentences to InputExample
:param sentences: list of strings
:return: list of InputExample
"""
unique_id = 0
for ss in sentences:
line = tokenization.convert_to_unicode(ss)
if not line:
continue
line = line.strip()
text_a = None
text_b = None
m = re.match(r"^(.*) \|\|\| (.*)$", line)
if m is None:
text_a = line
else:
text_a = m.group(1)
text_b = m.group(2)
yield InputExample(unique_id=unique_id, text_a=text_a, text_b=text_b)
unique_id += 1
if __name__ == "__main__":
import time
bert = BertVector()
while True:
question = input('question: ')
start = time.time()
vectors = bert.encode([question])
print(str(vectors))
#print(f'predict time:----------{time.time() - start}')
| 125softNLP | /125softNLP-0.0.1-py3-none-any.whl/bert/extract_feature.py | extract_feature.py |
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions and classes related to optimization (weight updates)."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
import tensorflow as tf
def create_optimizer(loss, init_lr, num_train_steps, num_warmup_steps, use_tpu):
"""Creates an optimizer training op."""
global_step = tf.train.get_or_create_global_step()
learning_rate = tf.constant(value=init_lr, shape=[], dtype=tf.float32)
# Implements linear decay of the learning rate.
learning_rate = tf.train.polynomial_decay(
learning_rate,
global_step,
num_train_steps,
end_learning_rate=0.0,
power=1.0,
cycle=False)
# Implements linear warmup. I.e., if global_step < num_warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
if num_warmup_steps:
global_steps_int = tf.cast(global_step, tf.int32)
warmup_steps_int = tf.constant(num_warmup_steps, dtype=tf.int32)
global_steps_float = tf.cast(global_steps_int, tf.float32)
warmup_steps_float = tf.cast(warmup_steps_int, tf.float32)
warmup_percent_done = global_steps_float / warmup_steps_float
warmup_learning_rate = init_lr * warmup_percent_done
is_warmup = tf.cast(global_steps_int < warmup_steps_int, tf.float32)
learning_rate = (
(1.0 - is_warmup) * learning_rate + is_warmup * warmup_learning_rate)
# It is recommended that you use this optimizer for fine tuning, since this
# is how the model was trained (note that the Adam m/v variables are NOT
# loaded from init_checkpoint.)
optimizer = AdamWeightDecayOptimizer(
learning_rate=learning_rate,
weight_decay_rate=0.01,
beta_1=0.9,
beta_2=0.999,
epsilon=1e-6,
exclude_from_weight_decay=["LayerNorm", "layer_norm", "bias"])
if use_tpu:
optimizer = tf.contrib.tpu.CrossShardOptimizer(optimizer)
tvars = tf.trainable_variables()
grads = tf.gradients(loss, tvars)
# This is how the model was pre-trained.
(grads, _) = tf.clip_by_global_norm(grads, clip_norm=1.0)
train_op = optimizer.apply_gradients(
zip(grads, tvars), global_step=global_step)
new_global_step = global_step + 1
train_op = tf.group(train_op, [global_step.assign(new_global_step)])
return train_op
class AdamWeightDecayOptimizer(tf.train.Optimizer):
"""A basic Adam optimizer that includes "correct" L2 weight decay."""
def __init__(self,
learning_rate,
weight_decay_rate=0.0,
beta_1=0.9,
beta_2=0.999,
epsilon=1e-6,
exclude_from_weight_decay=None,
name="AdamWeightDecayOptimizer"):
"""Constructs a AdamWeightDecayOptimizer."""
super(AdamWeightDecayOptimizer, self).__init__(False, name)
self.learning_rate = learning_rate
self.weight_decay_rate = weight_decay_rate
self.beta_1 = beta_1
self.beta_2 = beta_2
self.epsilon = epsilon
self.exclude_from_weight_decay = exclude_from_weight_decay
def apply_gradients(self, grads_and_vars, global_step=None, name=None):
"""See base class."""
assignments = []
for (grad, param) in grads_and_vars:
if grad is None or param is None:
continue
param_name = self._get_variable_name(param.name)
m = tf.get_variable(
name=param_name + "/adam_m",
shape=param.shape.as_list(),
dtype=tf.float32,
trainable=False,
initializer=tf.zeros_initializer())
v = tf.get_variable(
name=param_name + "/adam_v",
shape=param.shape.as_list(),
dtype=tf.float32,
trainable=False,
initializer=tf.zeros_initializer())
# Standard Adam update.
next_m = (
tf.multiply(self.beta_1, m) + tf.multiply(1.0 - self.beta_1, grad))
next_v = (
tf.multiply(self.beta_2, v) + tf.multiply(1.0 - self.beta_2,
tf.square(grad)))
update = next_m / (tf.sqrt(next_v) + self.epsilon)
# Just adding the square of the weights to the loss function is *not*
# the correct way of using L2 regularization/weight decay with Adam,
# since that will interact with the m and v parameters in strange ways.
#
# Instead we want ot decay the weights in a manner that doesn't interact
# with the m/v parameters. This is equivalent to adding the square
# of the weights to the loss with plain (non-momentum) SGD.
if self._do_use_weight_decay(param_name):
update += self.weight_decay_rate * param
update_with_lr = self.learning_rate * update
next_param = param - update_with_lr
assignments.extend(
[param.assign(next_param),
m.assign(next_m),
v.assign(next_v)])
return tf.group(*assignments, name=name)
def _do_use_weight_decay(self, param_name):
"""Whether to use L2 weight decay for `param_name`."""
if not self.weight_decay_rate:
return False
if self.exclude_from_weight_decay:
for r in self.exclude_from_weight_decay:
if re.search(r, param_name) is not None:
return False
return True
def _get_variable_name(self, param_name):
"""Get the variable name from the tensor name."""
m = re.match("^(.*):\\d+$", param_name)
if m is not None:
param_name = m.group(1)
return param_name
| 125softNLP | /125softNLP-0.0.1-py3-none-any.whl/bert/optimization.py | optimization.py |
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tokenization classes."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import unicodedata
import six
import tensorflow as tf
def convert_to_unicode(text):
"""Converts `text` to Unicode (if it's not already), assuming utf-8 input."""
if six.PY3:
if isinstance(text, str):
return text
elif isinstance(text, bytes):
return text.decode("utf-8", "ignore")
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
elif six.PY2:
if isinstance(text, str):
return text.decode("utf-8", "ignore")
elif isinstance(text, unicode):
return text
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
else:
raise ValueError("Not running on Python2 or Python 3?")
def printable_text(text):
"""Returns text encoded in a way suitable for print or `tf.logging`."""
# These functions want `str` for both Python2 and Python3, but in one case
# it's a Unicode string and in the other it's a byte string.
if six.PY3:
if isinstance(text, str):
return text
elif isinstance(text, bytes):
return text.decode("utf-8", "ignore")
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
elif six.PY2:
if isinstance(text, str):
return text
elif isinstance(text, unicode):
return text.encode("utf-8")
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
else:
raise ValueError("Not running on Python2 or Python 3?")
def load_vocab(vocab_file):
"""Loads a vocabulary file into a dictionary."""
vocab = collections.OrderedDict()
index = 0
with tf.gfile.GFile(vocab_file, "r") as reader:
while True:
token = convert_to_unicode(reader.readline())
if not token:
break
token = token.strip()
vocab[token] = index
index += 1
return vocab
def convert_by_vocab(vocab, items):
"""Converts a sequence of [tokens|ids] using the vocab."""
output = []
for item in items:
output.append(vocab[item])
return output
def convert_tokens_to_ids(vocab, tokens):
return convert_by_vocab(vocab, tokens)
def convert_ids_to_tokens(inv_vocab, ids):
return convert_by_vocab(inv_vocab, ids)
def whitespace_tokenize(text):
"""Runs basic whitespace cleaning and splitting on a piece of text."""
text = text.strip()
if not text:
return []
tokens = text.split()
return tokens
class FullTokenizer(object):
"""Runs end-to-end tokenziation."""
def __init__(self, vocab_file, do_lower_case=True):
self.vocab = load_vocab(vocab_file)
self.inv_vocab = {v: k for k, v in self.vocab.items()}
self.basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case)
self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab)
def tokenize(self, text):
split_tokens = []
for token in self.basic_tokenizer.tokenize(text):
for sub_token in self.wordpiece_tokenizer.tokenize(token):
split_tokens.append(sub_token)
return split_tokens
def convert_tokens_to_ids(self, tokens):
return convert_by_vocab(self.vocab, tokens)
def convert_ids_to_tokens(self, ids):
return convert_by_vocab(self.inv_vocab, ids)
class BasicTokenizer(object):
"""Runs basic tokenization (punctuation splitting, lower casing, etc.)."""
def __init__(self, do_lower_case=True):
"""Constructs a BasicTokenizer.
Args:
do_lower_case: Whether to lower case the input.
"""
self.do_lower_case = do_lower_case
def tokenize(self, text):
"""Tokenizes a piece of text."""
text = convert_to_unicode(text)
text = self._clean_text(text)
# This was added on November 1st, 2018 for the multilingual and Chinese
# models. This is also applied to the English models now, but it doesn't
# matter since the English models were not trained on any Chinese data
# and generally don't have any Chinese data in them (there are Chinese
# characters in the vocabulary because Wikipedia does have some Chinese
# words in the English Wikipedia.).
text = self._tokenize_chinese_chars(text)
orig_tokens = whitespace_tokenize(text)
split_tokens = []
for token in orig_tokens:
if self.do_lower_case:
token = token.lower()
token = self._run_strip_accents(token)
split_tokens.extend(self._run_split_on_punc(token))
output_tokens = whitespace_tokenize(" ".join(split_tokens))
return output_tokens
def _run_strip_accents(self, text):
"""Strips accents from a piece of text."""
text = unicodedata.normalize("NFD", text)
output = []
for char in text:
cat = unicodedata.category(char)
if cat == "Mn":
continue
output.append(char)
return "".join(output)
def _run_split_on_punc(self, text):
"""Splits punctuation on a piece of text."""
chars = list(text)
i = 0
start_new_word = True
output = []
while i < len(chars):
char = chars[i]
if _is_punctuation(char):
output.append([char])
start_new_word = True
else:
if start_new_word:
output.append([])
start_new_word = False
output[-1].append(char)
i += 1
return ["".join(x) for x in output]
def _tokenize_chinese_chars(self, text):
"""Adds whitespace around any CJK character."""
output = []
for char in text:
cp = ord(char)
if self._is_chinese_char(cp):
output.append(" ")
output.append(char)
output.append(" ")
else:
output.append(char)
return "".join(output)
def _is_chinese_char(self, cp):
"""Checks whether CP is the codepoint of a CJK character."""
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if ((cp >= 0x4E00 and cp <= 0x9FFF) or #
(cp >= 0x3400 and cp <= 0x4DBF) or #
(cp >= 0x20000 and cp <= 0x2A6DF) or #
(cp >= 0x2A700 and cp <= 0x2B73F) or #
(cp >= 0x2B740 and cp <= 0x2B81F) or #
(cp >= 0x2B820 and cp <= 0x2CEAF) or
(cp >= 0xF900 and cp <= 0xFAFF) or #
(cp >= 0x2F800 and cp <= 0x2FA1F)): #
return True
return False
def _clean_text(self, text):
"""Performs invalid character removal and whitespace cleanup on text."""
output = []
for char in text:
cp = ord(char)
if cp == 0 or cp == 0xfffd or _is_control(char):
continue
if _is_whitespace(char):
output.append(" ")
else:
output.append(char)
return "".join(output)
class WordpieceTokenizer(object):
"""Runs WordPiece tokenziation."""
def __init__(self, vocab, unk_token="[UNK]", max_input_chars_per_word=200):
self.vocab = vocab
self.unk_token = unk_token
self.max_input_chars_per_word = max_input_chars_per_word
def tokenize(self, text):
"""Tokenizes a piece of text into its word pieces.
This uses a greedy longest-match-first algorithm to perform tokenization
using the given vocabulary.
For example:
input = "unaffable"
output = ["un", "##aff", "##able"]
Args:
text: A single token or whitespace separated tokens. This should have
already been passed through `BasicTokenizer.
Returns:
A list of wordpiece tokens.
"""
text = convert_to_unicode(text)
output_tokens = []
for token in whitespace_tokenize(text):
chars = list(token)
if len(chars) > self.max_input_chars_per_word:
output_tokens.append(self.unk_token)
continue
is_bad = False
start = 0
sub_tokens = []
while start < len(chars):
end = len(chars)
cur_substr = None
while start < end:
substr = "".join(chars[start:end])
if start > 0:
substr = "##" + substr
if substr in self.vocab:
cur_substr = substr
break
end -= 1
if cur_substr is None:
is_bad = True
break
sub_tokens.append(cur_substr)
start = end
if is_bad:
output_tokens.append(self.unk_token)
else:
output_tokens.extend(sub_tokens)
return output_tokens
def _is_whitespace(char):
"""Checks whether `chars` is a whitespace character."""
# \t, \n, and \r are technically contorl characters but we treat them
# as whitespace since they are generally considered as such.
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
cat = unicodedata.category(char)
if cat == "Zs":
return True
return False
def _is_control(char):
"""Checks whether `chars` is a control character."""
# These are technically control characters but we count them as whitespace
# characters.
if char == "\t" or char == "\n" or char == "\r":
return False
cat = unicodedata.category(char)
if cat.startswith("C"):
return True
return False
def _is_punctuation(char):
"""Checks whether `chars` is a punctuation character."""
cp = ord(char)
# We treat all non-letter/number ASCII as punctuation.
# Characters such as "^", "$", and "`" are not in the Unicode
# Punctuation class but we treat them as punctuation anyways, for
# consistency.
if ((cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or
(cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126)):
return True
cat = unicodedata.category(char)
if cat.startswith("P"):
return True
return False
| 125softNLP | /125softNLP-0.0.1-py3-none-any.whl/bert/tokenization.py | tokenization.py |
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The main BERT model and related functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import copy
import json
import math
import re
import six
import tensorflow as tf
class BertConfig(object):
"""Configuration for `BertModel`."""
def __init__(self,
vocab_size,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=16,
initializer_range=0.02):
"""Constructs BertConfig.
Args:
vocab_size: Vocabulary size of `inputs_ids` in `BertModel`.
hidden_size: Size of the encoder layers and the pooler layer.
num_hidden_layers: Number of hidden layers in the Transformer encoder.
num_attention_heads: Number of attention heads for each attention layer in
the Transformer encoder.
intermediate_size: The size of the "intermediate" (i.e., feed-forward)
layer in the Transformer encoder.
hidden_act: The non-linear activation function (function or string) in the
encoder and pooler.
hidden_dropout_prob: The dropout probability for all fully connected
layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob: The dropout ratio for the attention
probabilities.
max_position_embeddings: The maximum sequence length that this model might
ever be used with. Typically set this to something large just in case
(e.g., 512 or 1024 or 2048).
type_vocab_size: The vocabulary size of the `token_type_ids` passed into
`BertModel`.
initializer_range: The stdev of the truncated_normal_initializer for
initializing all weight matrices.
"""
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
@classmethod
def from_dict(cls, json_object):
"""Constructs a `BertConfig` from a Python dictionary of parameters."""
config = BertConfig(vocab_size=None)
for (key, value) in six.iteritems(json_object):
config.__dict__[key] = value
return config
@classmethod
def from_json_file(cls, json_file):
"""Constructs a `BertConfig` from a json file of parameters."""
with tf.gfile.GFile(json_file, "r") as reader:
text = reader.read()
return cls.from_dict(json.loads(text))
def to_dict(self):
"""Serializes this instance to a Python dictionary."""
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
"""Serializes this instance to a JSON string."""
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
class BertModel(object):
"""BERT model ("Bidirectional Embedding Representations from a Transformer").
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = tf.constant([[31, 51, 99], [15, 5, 0]])
input_mask = tf.constant([[1, 1, 1], [1, 1, 0]])
token_type_ids = tf.constant([[0, 0, 1], [0, 2, 0]])
config = modeling.BertConfig(vocab_size=32000, hidden_size=512,
num_hidden_layers=8, num_attention_heads=6, intermediate_size=1024)
model = modeling.BertModel(config=config, is_training=True,
input_ids=input_ids, input_mask=input_mask, token_type_ids=token_type_ids)
label_embeddings = tf.get_variable(...)
pooled_output = model.get_pooled_output()
logits = tf.matmul(pooled_output, label_embeddings)
...
```
"""
def __init__(self,
config,
is_training,
input_ids,
input_mask=None,
token_type_ids=None,
use_one_hot_embeddings=True,
scope=None):
"""Constructor for BertModel.
Args:
config: `BertConfig` instance.
is_training: bool. rue for training model, false for eval model. Controls
whether dropout will be applied.
input_ids: int32 Tensor of shape [batch_size, seq_length].
input_mask: (optional) int32 Tensor of shape [batch_size, seq_length].
token_type_ids: (optional) int32 Tensor of shape [batch_size, seq_length].
use_one_hot_embeddings: (optional) bool. Whether to use one-hot word
embeddings or tf.embedding_lookup() for the word embeddings. On the TPU,
it is must faster if this is True, on the CPU or GPU, it is faster if
this is False.
scope: (optional) variable scope. Defaults to "bert".
Raises:
ValueError: The config is invalid or one of the input tensor shapes
is invalid.
"""
config = copy.deepcopy(config)
if not is_training:
config.hidden_dropout_prob = 0.0
config.attention_probs_dropout_prob = 0.0
input_shape = get_shape_list(input_ids, expected_rank=2)
batch_size = input_shape[0]
seq_length = input_shape[1]
if input_mask is None:
input_mask = tf.ones(shape=[batch_size, seq_length], dtype=tf.int32)
if token_type_ids is None:
token_type_ids = tf.zeros(shape=[batch_size, seq_length], dtype=tf.int32)
with tf.variable_scope(scope, default_name="bert"):
with tf.variable_scope("embeddings"):
# Perform embedding lookup on the word ids.
(self.embedding_output, self.embedding_table) = embedding_lookup(
input_ids=input_ids,
vocab_size=config.vocab_size,
embedding_size=config.hidden_size,
initializer_range=config.initializer_range,
word_embedding_name="word_embeddings",
use_one_hot_embeddings=use_one_hot_embeddings)
# Add positional embeddings and token type embeddings, then layer
# normalize and perform dropout.
self.embedding_output = embedding_postprocessor(
input_tensor=self.embedding_output,
use_token_type=True,
token_type_ids=token_type_ids,
token_type_vocab_size=config.type_vocab_size,
token_type_embedding_name="token_type_embeddings",
use_position_embeddings=True,
position_embedding_name="position_embeddings",
initializer_range=config.initializer_range,
max_position_embeddings=config.max_position_embeddings,
dropout_prob=config.hidden_dropout_prob)
with tf.variable_scope("encoder"):
# This converts a 2D mask of shape [batch_size, seq_length] to a 3D
# mask of shape [batch_size, seq_length, seq_length] which is used
# for the attention scores.
attention_mask = create_attention_mask_from_input_mask(
input_ids, input_mask)
# Run the stacked transformer.
# `sequence_output` shape = [batch_size, seq_length, hidden_size].
self.all_encoder_layers = transformer_model(
input_tensor=self.embedding_output,
attention_mask=attention_mask,
hidden_size=config.hidden_size,
num_hidden_layers=config.num_hidden_layers,
num_attention_heads=config.num_attention_heads,
intermediate_size=config.intermediate_size,
intermediate_act_fn=get_activation(config.hidden_act),
hidden_dropout_prob=config.hidden_dropout_prob,
attention_probs_dropout_prob=config.attention_probs_dropout_prob,
initializer_range=config.initializer_range,
do_return_all_layers=True)
self.sequence_output = self.all_encoder_layers[-1]
# The "pooler" converts the encoded sequence tensor of shape
# [batch_size, seq_length, hidden_size] to a tensor of shape
# [batch_size, hidden_size]. This is necessary for segment-level
# (or segment-pair-level) classification tasks where we need a fixed
# dimensional representation of the segment.
with tf.variable_scope("pooler"):
# We "pool" the model by simply taking the hidden state corresponding
# to the first token. We assume that this has been pre-trained
first_token_tensor = tf.squeeze(self.sequence_output[:, 0:1, :], axis=1)
self.pooled_output = tf.layers.dense(
first_token_tensor,
config.hidden_size,
activation=tf.tanh,
kernel_initializer=create_initializer(config.initializer_range))
def get_pooled_output(self):
return self.pooled_output
def get_sequence_output(self):
"""Gets final hidden layer of encoder.
Returns:
float Tensor of shape [batch_size, seq_length, hidden_size] corresponding
to the final hidden of the transformer encoder.
"""
return self.sequence_output
def get_all_encoder_layers(self):
return self.all_encoder_layers
def get_embedding_output(self):
"""Gets output of the embedding lookup (i.e., input to the transformer).
Returns:
float Tensor of shape [batch_size, seq_length, hidden_size] corresponding
to the output of the embedding layer, after summing the word
embeddings with the positional embeddings and the token type embeddings,
then performing layer normalization. This is the input to the transformer.
"""
return self.embedding_output
def get_embedding_table(self):
return self.embedding_table
def gelu(input_tensor):
"""Gaussian Error Linear Unit.
This is a smoother version of the RELU.
Original paper: https://arxiv.org/abs/1606.08415
Args:
input_tensor: float Tensor to perform activation.
Returns:
`input_tensor` with the GELU activation applied.
"""
cdf = 0.5 * (1.0 + tf.erf(input_tensor / tf.sqrt(2.0)))
return input_tensor * cdf
def get_activation(activation_string):
"""Maps a string to a Python function, e.g., "relu" => `tf.nn.relu`.
Args:
activation_string: String name of the activation function.
Returns:
A Python function corresponding to the activation function. If
`activation_string` is None, empty, or "linear", this will return None.
If `activation_string` is not a string, it will return `activation_string`.
Raises:
ValueError: The `activation_string` does not correspond to a known
activation.
"""
# We assume that anything that"s not a string is already an activation
# function, so we just return it.
if not isinstance(activation_string, six.string_types):
return activation_string
if not activation_string:
return None
act = activation_string.lower()
if act == "linear":
return None
elif act == "relu":
return tf.nn.relu
elif act == "gelu":
return gelu
elif act == "tanh":
return tf.tanh
else:
raise ValueError("Unsupported activation: %s" % act)
def get_assignment_map_from_checkpoint(tvars, init_checkpoint):
"""Compute the union of the current variables and checkpoint variables."""
assignment_map = {}
initialized_variable_names = {}
name_to_variable = collections.OrderedDict()
for var in tvars:
name = var.name
m = re.match("^(.*):\\d+$", name)
if m is not None:
name = m.group(1)
name_to_variable[name] = var
init_vars = tf.train.list_variables(init_checkpoint)
assignment_map = collections.OrderedDict()
for x in init_vars:
(name, var) = (x[0], x[1])
if name not in name_to_variable:
continue
assignment_map[name] = name
initialized_variable_names[name] = 1
initialized_variable_names[name + ":0"] = 1
return (assignment_map, initialized_variable_names)
def dropout(input_tensor, dropout_prob):
"""Perform dropout.
Args:
input_tensor: float Tensor.
dropout_prob: Python float. The probability of dropping out a value (NOT of
*keeping* a dimension as in `tf.nn.dropout`).
Returns:
A version of `input_tensor` with dropout applied.
"""
if dropout_prob is None or dropout_prob == 0.0:
return input_tensor
output = tf.nn.dropout(input_tensor, 1.0 - dropout_prob)
return output
def layer_norm(input_tensor, name=None):
"""Run layer normalization on the last dimension of the tensor."""
return tf.contrib.layers.layer_norm(
inputs=input_tensor, begin_norm_axis=-1, begin_params_axis=-1, scope=name)
def layer_norm_and_dropout(input_tensor, dropout_prob, name=None):
"""Runs layer normalization followed by dropout."""
output_tensor = layer_norm(input_tensor, name)
output_tensor = dropout(output_tensor, dropout_prob)
return output_tensor
def create_initializer(initializer_range=0.02):
"""Creates a `truncated_normal_initializer` with the given range."""
return tf.truncated_normal_initializer(stddev=initializer_range)
def embedding_lookup(input_ids,
vocab_size,
embedding_size=128,
initializer_range=0.02,
word_embedding_name="word_embeddings",
use_one_hot_embeddings=False):
"""Looks up words embeddings for id tensor.
Args:
input_ids: int32 Tensor of shape [batch_size, seq_length] containing word
ids.
vocab_size: int. Size of the embedding vocabulary.
embedding_size: int. Width of the word embeddings.
initializer_range: float. Embedding initialization range.
word_embedding_name: string. Name of the embedding table.
use_one_hot_embeddings: bool. If True, use one-hot method for word
embeddings. If False, use `tf.nn.embedding_lookup()`. One hot is better
for TPUs.
Returns:
float Tensor of shape [batch_size, seq_length, embedding_size].
"""
# This function assumes that the input is of shape [batch_size, seq_length,
# num_inputs].
#
# If the input is a 2D tensor of shape [batch_size, seq_length], we
# reshape to [batch_size, seq_length, 1].
if input_ids.shape.ndims == 2:
input_ids = tf.expand_dims(input_ids, axis=[-1])
embedding_table = tf.get_variable(
name=word_embedding_name,
shape=[vocab_size, embedding_size],
initializer=create_initializer(initializer_range))
if use_one_hot_embeddings:
flat_input_ids = tf.reshape(input_ids, [-1])
one_hot_input_ids = tf.one_hot(flat_input_ids, depth=vocab_size)
output = tf.matmul(one_hot_input_ids, embedding_table)
else:
output = tf.nn.embedding_lookup(embedding_table, input_ids)
input_shape = get_shape_list(input_ids)
output = tf.reshape(output,
input_shape[0:-1] + [input_shape[-1] * embedding_size])
return (output, embedding_table)
def embedding_postprocessor(input_tensor,
use_token_type=False,
token_type_ids=None,
token_type_vocab_size=16,
token_type_embedding_name="token_type_embeddings",
use_position_embeddings=True,
position_embedding_name="position_embeddings",
initializer_range=0.02,
max_position_embeddings=512,
dropout_prob=0.1):
"""Performs various post-processing on a word embedding tensor.
Args:
input_tensor: float Tensor of shape [batch_size, seq_length,
embedding_size].
use_token_type: bool. Whether to add embeddings for `token_type_ids`.
token_type_ids: (optional) int32 Tensor of shape [batch_size, seq_length].
Must be specified if `use_token_type` is True.
token_type_vocab_size: int. The vocabulary size of `token_type_ids`.
token_type_embedding_name: string. The name of the embedding table variable
for token type ids.
use_position_embeddings: bool. Whether to add position embeddings for the
position of each token in the sequence.
position_embedding_name: string. The name of the embedding table variable
for positional embeddings.
initializer_range: float. Range of the weight initialization.
max_position_embeddings: int. Maximum sequence length that might ever be
used with this model. This can be longer than the sequence length of
input_tensor, but cannot be shorter.
dropout_prob: float. Dropout probability applied to the final output tensor.
Returns:
float tensor with same shape as `input_tensor`.
Raises:
ValueError: One of the tensor shapes or input values is invalid.
"""
input_shape = get_shape_list(input_tensor, expected_rank=3)
batch_size = input_shape[0]
seq_length = input_shape[1]
width = input_shape[2]
output = input_tensor
if use_token_type:
if token_type_ids is None:
raise ValueError("`token_type_ids` must be specified if"
"`use_token_type` is True.")
token_type_table = tf.get_variable(
name=token_type_embedding_name,
shape=[token_type_vocab_size, width],
initializer=create_initializer(initializer_range))
# This vocab will be small so we always do one-hot here, since it is always
# faster for a small vocabulary.
flat_token_type_ids = tf.reshape(token_type_ids, [-1])
one_hot_ids = tf.one_hot(flat_token_type_ids, depth=token_type_vocab_size)
token_type_embeddings = tf.matmul(one_hot_ids, token_type_table)
token_type_embeddings = tf.reshape(token_type_embeddings,
[batch_size, seq_length, width])
output += token_type_embeddings
if use_position_embeddings:
assert_op = tf.assert_less_equal(seq_length, max_position_embeddings)
with tf.control_dependencies([assert_op]):
full_position_embeddings = tf.get_variable(
name=position_embedding_name,
shape=[max_position_embeddings, width],
initializer=create_initializer(initializer_range))
# Since the position embedding table is a learned variable, we create it
# using a (long) sequence length `max_position_embeddings`. The actual
# sequence length might be shorter than this, for faster training of
# tasks that do not have long sequences.
#
# So `full_position_embeddings` is effectively an embedding table
# for position [0, 1, 2, ..., max_position_embeddings-1], and the current
# sequence has positions [0, 1, 2, ... seq_length-1], so we can just
# perform a slice.
position_embeddings = tf.slice(full_position_embeddings, [0, 0],
[seq_length, -1])
num_dims = len(output.shape.as_list())
# Only the last two dimensions are relevant (`seq_length` and `width`), so
# we broadcast among the first dimensions, which is typically just
# the batch size.
position_broadcast_shape = []
for _ in range(num_dims - 2):
position_broadcast_shape.append(1)
position_broadcast_shape.extend([seq_length, width])
position_embeddings = tf.reshape(position_embeddings,
position_broadcast_shape)
output += position_embeddings
output = layer_norm_and_dropout(output, dropout_prob)
return output
def create_attention_mask_from_input_mask(from_tensor, to_mask):
"""Create 3D attention mask from a 2D tensor mask.
Args:
from_tensor: 2D or 3D Tensor of shape [batch_size, from_seq_length, ...].
to_mask: int32 Tensor of shape [batch_size, to_seq_length].
Returns:
float Tensor of shape [batch_size, from_seq_length, to_seq_length].
"""
from_shape = get_shape_list(from_tensor, expected_rank=[2, 3])
batch_size = from_shape[0]
from_seq_length = from_shape[1]
to_shape = get_shape_list(to_mask, expected_rank=2)
to_seq_length = to_shape[1]
to_mask = tf.cast(
tf.reshape(to_mask, [batch_size, 1, to_seq_length]), tf.float32)
# We don't assume that `from_tensor` is a mask (although it could be). We
# don't actually care if we attend *from* padding tokens (only *to* padding)
# tokens so we create a tensor of all ones.
#
# `broadcast_ones` = [batch_size, from_seq_length, 1]
broadcast_ones = tf.ones(
shape=[batch_size, from_seq_length, 1], dtype=tf.float32)
# Here we broadcast along two dimensions to create the mask.
mask = broadcast_ones * to_mask
return mask
def attention_layer(from_tensor,
to_tensor,
attention_mask=None,
num_attention_heads=1,
size_per_head=512,
query_act=None,
key_act=None,
value_act=None,
attention_probs_dropout_prob=0.0,
initializer_range=0.02,
do_return_2d_tensor=False,
batch_size=None,
from_seq_length=None,
to_seq_length=None):
"""Performs multi-headed attention from `from_tensor` to `to_tensor`.
This is an implementation of multi-headed attention based on "Attention
is all you Need". If `from_tensor` and `to_tensor` are the same, then
this is self-attention. Each timestep in `from_tensor` attends to the
corresponding sequence in `to_tensor`, and returns a fixed-with vector.
This function first projects `from_tensor` into a "query" tensor and
`to_tensor` into "key" and "value" tensors. These are (effectively) a list
of tensors of length `num_attention_heads`, where each tensor is of shape
[batch_size, seq_length, size_per_head].
Then, the query and key tensors are dot-producted and scaled. These are
softmaxed to obtain attention probabilities. The value tensors are then
interpolated by these probabilities, then concatenated back to a single
tensor and returned.
In practice, the multi-headed attention are done with transposes and
reshapes rather than actual separate tensors.
Args:
from_tensor: float Tensor of shape [batch_size, from_seq_length,
from_width].
to_tensor: float Tensor of shape [batch_size, to_seq_length, to_width].
attention_mask: (optional) int32 Tensor of shape [batch_size,
from_seq_length, to_seq_length]. The values should be 1 or 0. The
attention scores will effectively be set to -infinity for any positions in
the mask that are 0, and will be unchanged for positions that are 1.
num_attention_heads: int. Number of attention heads.
size_per_head: int. Size of each attention head.
query_act: (optional) Activation function for the query transform.
key_act: (optional) Activation function for the key transform.
value_act: (optional) Activation function for the value transform.
attention_probs_dropout_prob: (optional) float. Dropout probability of the
attention probabilities.
initializer_range: float. Range of the weight initializer.
do_return_2d_tensor: bool. If True, the output will be of shape [batch_size
* from_seq_length, num_attention_heads * size_per_head]. If False, the
output will be of shape [batch_size, from_seq_length, num_attention_heads
* size_per_head].
batch_size: (Optional) int. If the input is 2D, this might be the batch size
of the 3D version of the `from_tensor` and `to_tensor`.
from_seq_length: (Optional) If the input is 2D, this might be the seq length
of the 3D version of the `from_tensor`.
to_seq_length: (Optional) If the input is 2D, this might be the seq length
of the 3D version of the `to_tensor`.
Returns:
float Tensor of shape [batch_size, from_seq_length,
num_attention_heads * size_per_head]. (If `do_return_2d_tensor` is
true, this will be of shape [batch_size * from_seq_length,
num_attention_heads * size_per_head]).
Raises:
ValueError: Any of the arguments or tensor shapes are invalid.
"""
def transpose_for_scores(input_tensor, batch_size, num_attention_heads,
seq_length, width):
output_tensor = tf.reshape(
input_tensor, [batch_size, seq_length, num_attention_heads, width])
output_tensor = tf.transpose(output_tensor, [0, 2, 1, 3])
return output_tensor
from_shape = get_shape_list(from_tensor, expected_rank=[2, 3])
to_shape = get_shape_list(to_tensor, expected_rank=[2, 3])
if len(from_shape) != len(to_shape):
raise ValueError(
"The rank of `from_tensor` must match the rank of `to_tensor`.")
if len(from_shape) == 3:
batch_size = from_shape[0]
from_seq_length = from_shape[1]
to_seq_length = to_shape[1]
elif len(from_shape) == 2:
if (batch_size is None or from_seq_length is None or to_seq_length is None):
raise ValueError(
"When passing in rank 2 tensors to attention_layer, the values "
"for `batch_size`, `from_seq_length`, and `to_seq_length` "
"must all be specified.")
# Scalar dimensions referenced here:
# B = batch size (number of sequences)
# F = `from_tensor` sequence length
# T = `to_tensor` sequence length
# N = `num_attention_heads`
# H = `size_per_head`
from_tensor_2d = reshape_to_matrix(from_tensor)
to_tensor_2d = reshape_to_matrix(to_tensor)
# `query_layer` = [B*F, N*H]
query_layer = tf.layers.dense(
from_tensor_2d,
num_attention_heads * size_per_head,
activation=query_act,
name="query",
kernel_initializer=create_initializer(initializer_range))
# `key_layer` = [B*T, N*H]
key_layer = tf.layers.dense(
to_tensor_2d,
num_attention_heads * size_per_head,
activation=key_act,
name="key",
kernel_initializer=create_initializer(initializer_range))
# `value_layer` = [B*T, N*H]
value_layer = tf.layers.dense(
to_tensor_2d,
num_attention_heads * size_per_head,
activation=value_act,
name="value",
kernel_initializer=create_initializer(initializer_range))
# `query_layer` = [B, N, F, H]
query_layer = transpose_for_scores(query_layer, batch_size,
num_attention_heads, from_seq_length,
size_per_head)
# `key_layer` = [B, N, T, H]
key_layer = transpose_for_scores(key_layer, batch_size, num_attention_heads,
to_seq_length, size_per_head)
# Take the dot product between "query" and "key" to get the raw
# attention scores.
# `attention_scores` = [B, N, F, T]
attention_scores = tf.matmul(query_layer, key_layer, transpose_b=True)
attention_scores = tf.multiply(attention_scores,
1.0 / math.sqrt(float(size_per_head)))
if attention_mask is not None:
# `attention_mask` = [B, 1, F, T]
attention_mask = tf.expand_dims(attention_mask, axis=[1])
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
adder = (1.0 - tf.cast(attention_mask, tf.float32)) * -10000.0
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
attention_scores += adder
# Normalize the attention scores to probabilities.
# `attention_probs` = [B, N, F, T]
attention_probs = tf.nn.softmax(attention_scores)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = dropout(attention_probs, attention_probs_dropout_prob)
# `value_layer` = [B, T, N, H]
value_layer = tf.reshape(
value_layer,
[batch_size, to_seq_length, num_attention_heads, size_per_head])
# `value_layer` = [B, N, T, H]
value_layer = tf.transpose(value_layer, [0, 2, 1, 3])
# `context_layer` = [B, N, F, H]
context_layer = tf.matmul(attention_probs, value_layer)
# `context_layer` = [B, F, N, H]
context_layer = tf.transpose(context_layer, [0, 2, 1, 3])
if do_return_2d_tensor:
# `context_layer` = [B*F, N*V]
context_layer = tf.reshape(
context_layer,
[batch_size * from_seq_length, num_attention_heads * size_per_head])
else:
# `context_layer` = [B, F, N*V]
context_layer = tf.reshape(
context_layer,
[batch_size, from_seq_length, num_attention_heads * size_per_head])
return context_layer
def transformer_model(input_tensor,
attention_mask=None,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
intermediate_act_fn=gelu,
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
initializer_range=0.02,
do_return_all_layers=False):
"""Multi-headed, multi-layer Transformer from "Attention is All You Need".
This is almost an exact implementation of the original Transformer encoder.
See the original paper:
https://arxiv.org/abs/1706.03762
Also see:
https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/models/transformer.py
Args:
input_tensor: float Tensor of shape [batch_size, seq_length, hidden_size].
attention_mask: (optional) int32 Tensor of shape [batch_size, seq_length,
seq_length], with 1 for positions that can be attended to and 0 in
positions that should not be.
hidden_size: int. Hidden size of the Transformer.
num_hidden_layers: int. Number of layers (blocks) in the Transformer.
num_attention_heads: int. Number of attention heads in the Transformer.
intermediate_size: int. The size of the "intermediate" (a.k.a., feed
forward) layer.
intermediate_act_fn: function. The non-linear activation function to apply
to the output of the intermediate/feed-forward layer.
hidden_dropout_prob: float. Dropout probability for the hidden layers.
attention_probs_dropout_prob: float. Dropout probability of the attention
probabilities.
initializer_range: float. Range of the initializer (stddev of truncated
normal).
do_return_all_layers: Whether to also return all layers or just the final
layer.
Returns:
float Tensor of shape [batch_size, seq_length, hidden_size], the final
hidden layer of the Transformer.
Raises:
ValueError: A Tensor shape or parameter is invalid.
"""
if hidden_size % num_attention_heads != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (hidden_size, num_attention_heads))
attention_head_size = int(hidden_size / num_attention_heads)
input_shape = get_shape_list(input_tensor, expected_rank=3)
batch_size = input_shape[0]
seq_length = input_shape[1]
input_width = input_shape[2]
# The Transformer performs sum residuals on all layers so the input needs
# to be the same as the hidden size.
if input_width != hidden_size:
raise ValueError("The width of the input tensor (%d) != hidden size (%d)" %
(input_width, hidden_size))
# We keep the representation as a 2D tensor to avoid re-shaping it back and
# forth from a 3D tensor to a 2D tensor. Re-shapes are normally free on
# the GPU/CPU but may not be free on the TPU, so we want to minimize them to
# help the optimizer.
prev_output = reshape_to_matrix(input_tensor)
all_layer_outputs = []
for layer_idx in range(num_hidden_layers):
with tf.variable_scope("layer_%d" % layer_idx):
layer_input = prev_output
with tf.variable_scope("attention"):
attention_heads = []
with tf.variable_scope("self"):
attention_head = attention_layer(
from_tensor=layer_input,
to_tensor=layer_input,
attention_mask=attention_mask,
num_attention_heads=num_attention_heads,
size_per_head=attention_head_size,
attention_probs_dropout_prob=attention_probs_dropout_prob,
initializer_range=initializer_range,
do_return_2d_tensor=True,
batch_size=batch_size,
from_seq_length=seq_length,
to_seq_length=seq_length)
attention_heads.append(attention_head)
attention_output = None
if len(attention_heads) == 1:
attention_output = attention_heads[0]
else:
# In the case where we have other sequences, we just concatenate
# them to the self-attention head before the projection.
attention_output = tf.concat(attention_heads, axis=-1)
# Run a linear projection of `hidden_size` then add a residual
# with `layer_input`.
with tf.variable_scope("output"):
attention_output = tf.layers.dense(
attention_output,
hidden_size,
kernel_initializer=create_initializer(initializer_range))
attention_output = dropout(attention_output, hidden_dropout_prob)
attention_output = layer_norm(attention_output + layer_input)
# The activation is only applied to the "intermediate" hidden layer.
with tf.variable_scope("intermediate"):
intermediate_output = tf.layers.dense(
attention_output,
intermediate_size,
activation=intermediate_act_fn,
kernel_initializer=create_initializer(initializer_range))
# Down-project back to `hidden_size` then add the residual.
with tf.variable_scope("output"):
layer_output = tf.layers.dense(
intermediate_output,
hidden_size,
kernel_initializer=create_initializer(initializer_range))
layer_output = dropout(layer_output, hidden_dropout_prob)
layer_output = layer_norm(layer_output + attention_output)
prev_output = layer_output
all_layer_outputs.append(layer_output)
if do_return_all_layers:
final_outputs = []
for layer_output in all_layer_outputs:
final_output = reshape_from_matrix(layer_output, input_shape)
final_outputs.append(final_output)
return final_outputs
else:
final_output = reshape_from_matrix(prev_output, input_shape)
return final_output
def get_shape_list(tensor, expected_rank=None, name=None):
"""Returns a list of the shape of tensor, preferring static dimensions.
Args:
tensor: A tf.Tensor object to find the shape of.
expected_rank: (optional) int. The expected rank of `tensor`. If this is
specified and the `tensor` has a different rank, and exception will be
thrown.
name: Optional name of the tensor for the error message.
Returns:
A list of dimensions of the shape of tensor. All static dimensions will
be returned as python integers, and dynamic dimensions will be returned
as tf.Tensor scalars.
"""
if name is None:
name = tensor.name
if expected_rank is not None:
assert_rank(tensor, expected_rank, name)
shape = tensor.shape.as_list()
non_static_indexes = []
for (index, dim) in enumerate(shape):
if dim is None:
non_static_indexes.append(index)
if not non_static_indexes:
return shape
dyn_shape = tf.shape(tensor)
for index in non_static_indexes:
shape[index] = dyn_shape[index]
return shape
def reshape_to_matrix(input_tensor):
"""Reshapes a >= rank 2 tensor to a rank 2 tensor (i.e., a matrix)."""
ndims = input_tensor.shape.ndims
if ndims < 2:
raise ValueError("Input tensor must have at least rank 2. Shape = %s" %
(input_tensor.shape))
if ndims == 2:
return input_tensor
width = input_tensor.shape[-1]
output_tensor = tf.reshape(input_tensor, [-1, width])
return output_tensor
def reshape_from_matrix(output_tensor, orig_shape_list):
"""Reshapes a rank 2 tensor back to its original rank >= 2 tensor."""
if len(orig_shape_list) == 2:
return output_tensor
output_shape = get_shape_list(output_tensor)
orig_dims = orig_shape_list[0:-1]
width = output_shape[-1]
return tf.reshape(output_tensor, orig_dims + [width])
def assert_rank(tensor, expected_rank, name=None):
"""Raises an exception if the tensor rank is not of the expected rank.
Args:
tensor: A tf.Tensor to check the rank of.
expected_rank: Python integer or list of integers, expected rank.
name: Optional name of the tensor for the error message.
Raises:
ValueError: If the expected shape doesn't match the actual shape.
"""
if name is None:
name = tensor.name
expected_rank_dict = {}
if isinstance(expected_rank, six.integer_types):
expected_rank_dict[expected_rank] = True
else:
for x in expected_rank:
expected_rank_dict[x] = True
actual_rank = tensor.shape.ndims
if actual_rank not in expected_rank_dict:
scope_name = tf.get_variable_scope().name
raise ValueError(
"For the tensor `%s` in scope `%s`, the actual rank "
"`%d` (shape = %s) is not equal to the expected rank `%s`" %
(name, scope_name, actual_rank, str(tensor.shape), str(expected_rank)))
| 125softNLP | /125softNLP-0.0.1-py3-none-any.whl/bert/modeling.py | modeling.py |
import os
from enum import Enum
class PoolingStrategy(Enum):
NONE = 0
REDUCE_MAX = 1
REDUCE_MEAN = 2
REDUCE_MEAN_MAX = 3
FIRST_TOKEN = 4 # corresponds to [CLS] for single sequences
LAST_TOKEN = 5 # corresponds to [SEP] for single sequences
CLS_TOKEN = 4 # corresponds to the first token for single seq.
SEP_TOKEN = 5 # corresponds to the last token for single seq.
def __str__(self):
return self.name
@staticmethod
def from_string(s):
try:
return PoolingStrategy[s]
except KeyError:
raise ValueError()
# file_path = os.path.dirname(os.path.dirname(__file__))
# file_path = 'D:\pysoftNLP_resources'
# model_dir = os.path.join(file_path, 'chinese_L-12_H-768_A-12')
# print(file_path)
model_dir = 'D:\pysoftNLP_resources\pre_training file\chinese_L-12_H-768_A-12'
print('okkkkkkkk',model_dir)
config_name = os.path.join(model_dir, 'bert_config.json')
ckpt_name = os.path.join(model_dir, 'bert_model.ckpt')
vocab_file = os.path.join(model_dir, 'vocab.txt')
# the maximum length of a sequence,Sequences larger than max_seq_len will be truncated on the left side. Thus, if you
# want to send long sequences to the model, please make sure the program can handle them correctly.
#max_seq_len = 5
xla = True
# list of int. this model has 12 layers, By default this program works on the second last layer. The last layer is too
# closed to the target functions,If you question about this argument and want to use the last hidden layer anyway, please
# feel free to set layer_indexes=[-1], so we use the second last layer
layer_indexes = [-2]
#pooling_strategy = PoolingStrategy.NONE
| 125softNLP | /125softNLP-0.0.1-py3-none-any.whl/bert/args.py | args.py |
# coding:utf-8
import os
import tempfile
import random
import json
import logging
from termcolor import colored
from bert import modeling
from bert import args
from bert.args import PoolingStrategy
import contextlib
def import_tf(device_id=-1, verbose=False):
#os.environ['CUDA_VISIBLE_DEVICES'] = '-1' if device_id < 0 else str(device_id)
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '0' if verbose else '3'
import tensorflow as tf
tf.logging.set_verbosity(tf.logging.DEBUG if verbose else tf.logging.ERROR)
return tf
def set_logger(context, verbose=False):
logger = logging.getLogger(context)
logger.setLevel(logging.DEBUG if verbose else logging.INFO)
formatter = logging.Formatter(
'%(levelname)-.1s:' + context + ':[%(filename).5s:%(funcName).3s:%(lineno)3d]:%(message)s', datefmt=
'%m-%d %H:%M:%S')
console_handler = logging.StreamHandler()
console_handler.setLevel(logging.DEBUG if verbose else logging.INFO)
console_handler.setFormatter(formatter)
logger.handlers = []
logger.addHandler(console_handler)
return logger
def optimize_graph(logger=None, verbose=False, pooling_strategy=PoolingStrategy.REDUCE_MEAN, max_seq_len=40):
if not logger:
logger = set_logger(colored('BERT_VEC', 'yellow'), verbose)
try:
# we don't need GPU for optimizing the graph
tf = import_tf(device_id=0, verbose=verbose)
from tensorflow.python.tools.optimize_for_inference_lib import optimize_for_inference
# allow_soft_placement:自动选择运行设备
config = tf.ConfigProto(allow_soft_placement=True)
config_fp = args.config_name
init_checkpoint = args.ckpt_name
logger.info('model config: %s' % config_fp)
# 加载bert配置文件
with tf.gfile.GFile(config_fp, 'r') as f:
bert_config = modeling.BertConfig.from_dict(json.load(f))
logger.info('build graph...')
# input placeholders, not sure if they are friendly to XLA
input_ids = tf.placeholder(tf.int32, (None, max_seq_len), 'input_ids')
input_mask = tf.placeholder(tf.int32, (None, max_seq_len), 'input_mask')
input_type_ids = tf.placeholder(tf.int32, (None, max_seq_len), 'input_type_ids')
# xla加速
jit_scope = tf.contrib.compiler.jit.experimental_jit_scope if args.xla else contextlib.suppress
with jit_scope():
input_tensors = [input_ids, input_mask, input_type_ids]
model = modeling.BertModel(
config=bert_config,
is_training=False,
input_ids=input_ids,
input_mask=input_mask,
token_type_ids=input_type_ids,
use_one_hot_embeddings=False)
# 获取所有要训练的变量
tvars = tf.trainable_variables()
(assignment_map, initialized_variable_names) = modeling.get_assignment_map_from_checkpoint(tvars,
init_checkpoint)
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
minus_mask = lambda x, m: x - tf.expand_dims(1.0 - m, axis=-1) * 1e30
mul_mask = lambda x, m: x * tf.expand_dims(m, axis=-1)
masked_reduce_max = lambda x, m: tf.reduce_max(minus_mask(x, m), axis=1)
masked_reduce_mean = lambda x, m: tf.reduce_sum(mul_mask(x, m), axis=1) / (
tf.reduce_sum(m, axis=1, keepdims=True) + 1e-10)
# 共享卷积核
with tf.variable_scope("pooling"):
# 如果只有一层,就只取对应那一层的weight
if len(args.layer_indexes) == 1:
encoder_layer = model.all_encoder_layers[args.layer_indexes[0]]
else:
# 否则遍历需要取的层,把所有层的weight取出来并拼接起来shape:768*层数
all_layers = [model.all_encoder_layers[l] for l in args.layer_indexes]
encoder_layer = tf.concat(all_layers, -1)
input_mask = tf.cast(input_mask, tf.float32)
# 以下代码是句向量的生成方法,可以理解为做了一个卷积的操作,但是没有把结果相加, 卷积核是input_mask
if pooling_strategy == PoolingStrategy.REDUCE_MEAN:
pooled = masked_reduce_mean(encoder_layer, input_mask)
elif pooling_strategy == PoolingStrategy.REDUCE_MAX:
pooled = masked_reduce_max(encoder_layer, input_mask)
elif pooling_strategy == PoolingStrategy.REDUCE_MEAN_MAX:
pooled = tf.concat([masked_reduce_mean(encoder_layer, input_mask),
masked_reduce_max(encoder_layer, input_mask)], axis=1)
elif pooling_strategy == PoolingStrategy.FIRST_TOKEN or \
pooling_strategy == PoolingStrategy.CLS_TOKEN:
pooled = tf.squeeze(encoder_layer[:, 0:1, :], axis=1)
elif pooling_strategy == PoolingStrategy.LAST_TOKEN or \
pooling_strategy == PoolingStrategy.SEP_TOKEN:
seq_len = tf.cast(tf.reduce_sum(input_mask, axis=1), tf.int32)
rng = tf.range(0, tf.shape(seq_len)[0])
indexes = tf.stack([rng, seq_len - 1], 1)
pooled = tf.gather_nd(encoder_layer, indexes)
elif pooling_strategy == PoolingStrategy.NONE:
pooled = mul_mask(encoder_layer, input_mask)
else:
raise NotImplementedError()
pooled = tf.identity(pooled, 'final_encodes')
output_tensors = [pooled]
tmp_g = tf.get_default_graph().as_graph_def()
with tf.Session(config=config) as sess:
logger.info('load parameters from checkpoint...')
sess.run(tf.global_variables_initializer())
logger.info('freeze...')
tmp_g = tf.graph_util.convert_variables_to_constants(sess, tmp_g, [n.name[:-2] for n in output_tensors])
dtypes = [n.dtype for n in input_tensors]
logger.info('optimize...')
tmp_g = optimize_for_inference(
tmp_g,
[n.name[:-2] for n in input_tensors],
[n.name[:-2] for n in output_tensors],
[dtype.as_datatype_enum for dtype in dtypes],
False)
#tmp_file = tempfile.NamedTemporaryFile('w', delete=True).name
#r = random.randint(1, 1000)
#tmp_file = "./tmp_graph"+str(r)
tmp_file = "./tmp_graph11"
logger.info('write graph to a tmp file: %s' % tmp_file)
with tf.gfile.GFile(tmp_file, 'wb') as f:
f.write(tmp_g.SerializeToString())
return tmp_file
except Exception as e:
logger.error('fail to optimize the graph!')
logger.error(e)
| 125softNLP | /125softNLP-0.0.1-py3-none-any.whl/bert/graph.py | graph.py |
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 125softNLP | /125softNLP-0.0.1-py3-none-any.whl/bert/__init__.py | __init__.py |
# configclasses

[](https://codecov.io/gh/headsrooms/configclasses)
<a href="https://codeclimate.com/github/kingoodie/configclasses/maintainability"><img src="https://api.codeclimate.com/v1/badges/9094f65f5caef64fb993/maintainability" /></a>
[](https://pepy.tech/project/12factor-configclasses)
Like dataclasses but for config.
Specify your config with a class and load it with your env vars or env files.
```python
import httpx
from configclasses import configclass
class UserAPIClient(httpx.AsyncClient):
def __init__(self, config: ClientConfig, *args, **kwargs):
self.config = config
super().__init__(*args, **kwargs)
async def get_users(self, headers: Optional[Headers] = None) -> Dict[str, Any]:
response = await self.get(f"{self.path}/users", auth=headers)
response.raise_for_status()
return response.json()
@configclass
class ClientConfig:
host: str
port: int
config = ClientConfig.from_path(".env")
async with UserAPIClient(config) as client:
users = await client.get_users(auth_headers)
```
## Features
- Fill your configclasses with existent env vars.
- Define default values in case these variables have no value at all.
- Load your config files in env vars following [12factor apps](https://12factor.net) recommendations.
- Support for _.env_, _yaml_, _toml_, _ini_ and _json_.
- Convert your env vars with specified type in configclass: `int`, `float`, `str` or `bool`.
- Use nested configclasses to more complex configurations.
- Specify a prefix with `@configclass(prefix="<PREFIX>")` to append this prefix to your configclass' attribute names.
- Config groups (__TODO__): https://cli.dev/docs/tutorial/config_groups/
## Requirements
Python 3.8+
## Installation
Depending on your chosen config file format you can install:
- .env -> ```pip install 12factor-configclasses[dotenv]```
- .yaml -> ```pip install 12factor-configclasses[yaml]```
- .toml -> ```pip install 12factor-configclasses[toml]```
- .ini -> ```pip install 12factor-configclasses```
- .json -> ```pip install 12factor-configclasses```
Or install all supported formats with:
pip install 12factor-configclasses[full]
## Usage
There are three ways to use it:
- Loading an .env file:
```.env
# .env
HOST=0.0.0.0
PORT=8000
DB_URL=sqlite://:memory:
GENERATE_SCHEMAS=True
DEBUG=True
HTTPS_ONLY=False
GZIP=True
SENTRY=False
```
```python
#config.py
from configclasses import configclass
@configclass
class DB:
user: str
password: str
url: str
@configclass
class AppConfig:
host: str
port: int
db: DB
generate_schemas: bool
debug: bool
https_only: bool
gzip: bool
sentry: bool
```
```python
# app.py
from api.config import AppConfig
app_config = AppConfig.from_path(".env")
app = Starlette(debug=app_config.debug)
if app_config.https_only:
app.add_middleware(
HTTPSRedirectMiddleware)
if app_config.gzip:
app.add_middleware(GZipMiddleware)
if app_config.sentry:
app.add_middleware(SentryAsgiMiddleware)
...
register_tortoise(
app,
db_url=app_config.db.url,
modules={"models": ["api.models"]},
generate_schemas=app_config.generate_schemas,
)
if __name__ == "__main__":
uvicorn.run(app, host=app_config.host, port=app_config.port)
```
- Loading predefined environmental variables:
The same than before, but instead of:
app_config = AppConfig.from_path(".env")
You will do:
app_config = AppConfig.from_environ()
- Loading a file from a string:
```python
test_env = """HOST=0.0.0.0
PORT=8000
DB_URL=sqlite://:memory:
GENERATE_SCHEMAS=True
DEBUG=True
HTTPS_ONLY=False
GZIP=True
SENTRY=False"""
app_config = AppConfig.from_string(test_env, ".env")
```
| 12factor-configclasses | /12factor-configclasses-1.0.0.tar.gz/12factor-configclasses-1.0.0/README.md | README.md |
# -*- coding: utf-8 -*-
from setuptools import setup
packages = \
['configclasses']
package_data = \
{'': ['*']}
extras_require = \
{'dotenv': ['python-dotenv[dotenv]>=0.19,<0.20'],
'full': ['tomlkit[toml]>=0.7,<0.8',
'python-dotenv[dotenv]>=0.19,<0.20',
'pyyaml[yaml]>=6,<7'],
'toml': ['tomlkit[toml]>=0.7,<0.8'],
'yaml': ['pyyaml[yaml]>=6,<7']}
setup_kwargs = {
'name': '12factor-configclasses',
'version': '1.0.0',
'description': 'Like dataclasses but for config.',
'long_description': '# configclasses\n\n\n[](https://codecov.io/gh/headsrooms/configclasses)\n<a href="https://codeclimate.com/github/kingoodie/configclasses/maintainability"><img src="https://api.codeclimate.com/v1/badges/9094f65f5caef64fb993/maintainability" /></a>\n[](https://pepy.tech/project/12factor-configclasses)\n\n\nLike dataclasses but for config.\n\nSpecify your config with a class and load it with your env vars or env files.\n\n\n```python\nimport httpx\nfrom configclasses import configclass\n\n\nclass UserAPIClient(httpx.AsyncClient):\n def __init__(self, config: ClientConfig, *args, **kwargs):\n self.config = config\n super().__init__(*args, **kwargs)\n\n async def get_users(self, headers: Optional[Headers] = None) -> Dict[str, Any]:\n response = await self.get(f"{self.path}/users", auth=headers)\n response.raise_for_status()\n return response.json()\n \n@configclass\nclass ClientConfig:\n host: str\n port: int\n\nconfig = ClientConfig.from_path(".env")\nasync with UserAPIClient(config) as client:\n users = await client.get_users(auth_headers)\n```\n\n## Features\n\n- Fill your configclasses with existent env vars.\n- Define default values in case these variables have no value at all.\n- Load your config files in env vars following [12factor apps](https://12factor.net) recommendations.\n- Support for _.env_, _yaml_, _toml_, _ini_ and _json_.\n- Convert your env vars with specified type in configclass: `int`, `float`, `str` or `bool`.\n- Use nested configclasses to more complex configurations.\n- Specify a prefix with `@configclass(prefix="<PREFIX>")` to append this prefix to your configclass\' attribute names.\n- Config groups (__TODO__): https://cli.dev/docs/tutorial/config_groups/\n\n## Requirements\n\nPython 3.8+\n\n\n## Installation\n\nDepending on your chosen config file format you can install:\n\n- .env -> ```pip install 12factor-configclasses[dotenv]```\n- .yaml -> ```pip install 12factor-configclasses[yaml]```\n- .toml -> ```pip install 12factor-configclasses[toml]```\n- .ini -> ```pip install 12factor-configclasses```\n- .json -> ```pip install 12factor-configclasses```\n\nOr install all supported formats with:\n\n pip install 12factor-configclasses[full]\n \n## Usage\n\nThere are three ways to use it:\n\n- Loading an .env file:\n\n```.env\n# .env\nHOST=0.0.0.0\nPORT=8000\nDB_URL=sqlite://:memory:\nGENERATE_SCHEMAS=True\nDEBUG=True\nHTTPS_ONLY=False\nGZIP=True\nSENTRY=False\n```\n\n```python\n#config.py\nfrom configclasses import configclass\n\n\n@configclass\nclass DB:\n user: str\n password: str\n url: str\n\n\n@configclass\nclass AppConfig:\n host: str\n port: int\n db: DB\n generate_schemas: bool\n debug: bool\n https_only: bool\n gzip: bool\n sentry: bool\n```\n\n```python\n# app.py\nfrom api.config import AppConfig\n\napp_config = AppConfig.from_path(".env")\napp = Starlette(debug=app_config.debug)\n\nif app_config.https_only:\n app.add_middleware(\n HTTPSRedirectMiddleware)\nif app_config.gzip:\n app.add_middleware(GZipMiddleware)\nif app_config.sentry:\n app.add_middleware(SentryAsgiMiddleware)\n\n...\n\nregister_tortoise(\n app,\n db_url=app_config.db.url,\n modules={"models": ["api.models"]},\n generate_schemas=app_config.generate_schemas,\n)\n\nif __name__ == "__main__":\n uvicorn.run(app, host=app_config.host, port=app_config.port)\n```\n\n \n- Loading predefined environmental variables:\n\nThe same than before, but instead of:\n\n app_config = AppConfig.from_path(".env")\n \nYou will do:\n\n app_config = AppConfig.from_environ()\n \n- Loading a file from a string:\n\n```python\ntest_env = """HOST=0.0.0.0\nPORT=8000\nDB_URL=sqlite://:memory:\nGENERATE_SCHEMAS=True\nDEBUG=True\nHTTPS_ONLY=False\nGZIP=True\nSENTRY=False"""\napp_config = AppConfig.from_string(test_env, ".env")\n```\n',
'author': 'Pablo Cabezas',
'author_email': 'pabcabsal@gmail.com',
'maintainer': None,
'maintainer_email': None,
'url': 'https://github.com/kingoodie/configclasses',
'packages': packages,
'package_data': package_data,
'extras_require': extras_require,
'python_requires': '>=3.10,<4.0',
}
setup(**setup_kwargs)
| 12factor-configclasses | /12factor-configclasses-1.0.0.tar.gz/12factor-configclasses-1.0.0/setup.py | setup.py |
import json
from dataclasses import asdict, _MISSING_TYPE
from os import PathLike
def dump_env_key(key, value):
try:
lines = [
f"{dump_env_key(key + '_' + sub_key, sub_value)}"
for sub_key, sub_value in value.__dict__.items()
]
lines = [line for line in lines if line]
return "\n".join(lines)
except AttributeError:
return f"{key}={value}\n"
def dump_env(obj, path: PathLike):
lines = [f"{dump_env_key(key, value)}" for key, value in obj.__dict__.items()]
with open(path, "w") as file:
file.writelines(lines)
def dump_toml(obj, path):
raise NotImplementedError
def dump_yaml(obj, path):
raise NotImplementedError
def dump_ini(obj, path):
raise NotImplementedError
def dump_json_key(value):
try:
return {
key: sub_value
for key, sub_value in asdict(value).items()
if not isinstance(sub_value, _MISSING_TYPE)
}
except TypeError:
return value
def dump_json(obj, path: PathLike):
output = {key: dump_json_key(value) for key, value in obj.__dict__.items()}
with open(path, "w") as file:
json.dump(output, file)
| 12factor-configclasses | /12factor-configclasses-1.0.0.tar.gz/12factor-configclasses-1.0.0/configclasses/dumpers.py | dumpers.py |
from configclasses.dumpers import dump_env, dump_toml, dump_yaml, dump_ini, dump_json
from configclasses.exceptions import ConfigFilePathDoesNotExist, NonSupportedExtension
from configclasses.helpers import fill_init_dict, supported_extensions
from configclasses.loaders import (
load_env,
load_toml,
load_yaml,
load_ini,
load_json,
)
from dataclasses import _process_class, fields
from os import PathLike
from pathlib import Path
from typing import Dict, Optional
def configclass(
cls=None,
/,
*,
prefix: Optional[str] = None,
init=True,
repr=True,
eq=True,
order=False,
unsafe_hash=False,
frozen=False,
match_args=True,
kw_only=False,
slots=False,
):
"""Same behaviour that dataclass with additional classmethods as dataclass initializers:
from_environ and from_path"""
def wrap(cls):
return _post_process_class(
_process_class(
cls,
init,
repr,
eq,
order,
unsafe_hash,
frozen,
match_args,
kw_only,
slots,
),
prefix,
)
# See if we're being called as @configclass or @configclass().
if cls is None:
# We're called with parens.
return wrap
# We're called as @dataclass without parens.
return wrap(cls)
def _post_process_class(the_class, the_prefix: Optional[str]):
def from_environ(
cls, defaults: Dict[str, str] = None, parent_field_name: Optional[str] = None
):
fields_tuple = [
(field.name, field.type, field.default) for field in fields(cls)
]
init_dict = fill_init_dict(
fields_tuple, defaults, parent_field_name, the_prefix
)
return cls(**init_dict)
def from_path(cls, config_path: str, defaults: Dict[str, str] = None):
path_to_env(Path(config_path))
return cls.from_environ(defaults)
def from_string(cls, string: str, extension: str, defaults: Dict[str, str] = None):
load_file(string=string, extension=extension)
return cls.from_environ(defaults)
the_class.from_environ = classmethod(from_environ)
the_class.from_path = classmethod(from_path)
the_class.from_string = classmethod(from_string)
return the_class
def path_to_env(path: Path):
"""Given a path it loads into os.environ all config files found in this path."""
if not path.exists():
raise ConfigFilePathDoesNotExist(
f"Config file path '{str(path)}' does not exist"
)
if path.is_file():
load_file(path)
else:
load_path(path)
def file_to_env(
extension: str, path: Optional[Path] = None, string: Optional[str] = None
):
if extension == ".env":
load_env(path, string)
elif extension == ".toml":
load_toml(path, string)
elif extension in (".yaml", ".yml"):
load_yaml(path, string)
elif extension in (".ini", ".cfg"):
load_ini(path, string)
elif extension == ".json":
load_json(path, string)
def load_path(path: Path):
for x in path.iterdir():
path_to_env(x)
def load_file(
path: Optional[Path] = None,
string: Optional[str] = None,
extension: Optional[str] = None,
):
extension = path.suffix or path.name if path else extension
if extension in supported_extensions:
file_to_env(extension, path, string)
else:
raise NonSupportedExtension(f"Extension '{extension}' not supported")
def dump(
obj,
path: Optional[PathLike] = None,
extension: Optional[str] = None,
):
path = Path(path) if path else None
extension = path.suffix or path.name if path else extension
if extension in supported_extensions:
if extension == ".env":
dump_env(obj, path)
elif extension == ".toml":
dump_toml(obj, path)
elif extension in (".yaml", ".yml"):
dump_yaml(obj, path)
elif extension in (".ini", ".cfg"):
dump_ini(obj, path)
elif extension == ".json":
dump_json(obj, path)
else:
raise NonSupportedExtension(f"Extension '{extension}' not supported")
| 12factor-configclasses | /12factor-configclasses-1.0.0.tar.gz/12factor-configclasses-1.0.0/configclasses/configclasses.py | configclasses.py |
import configparser
import os
from io import StringIO
from json import loads
from pathlib import Path
from typing import Dict, Optional
from configclasses.exceptions import DependencyNotInstalled
from configclasses.helpers import normalize_field_name
def load_env(path: Optional[Path] = None, string: Optional[str] = None):
try:
from dotenv import load_dotenv
except ImportError:
raise DependencyNotInstalled("You must install 'python-dotenv'")
if path:
load_dotenv(dotenv_path=path)
else:
load_dotenv(stream=StringIO(string))
def load_dict(dict: Dict[str, str]):
for k, v in dict.items():
if isinstance(v, Dict):
inner_dict = {
f"{k}_{inner_key}": inner_value for inner_key, inner_value in v.items()
}
load_dict(inner_dict)
continue
os.environ[normalize_field_name(k)] = str(v)
def load_toml(path: Optional[Path] = None, string: Optional[str] = None):
try:
from tomlkit import parse
except ImportError:
raise DependencyNotInstalled("You must install 'tomlkit'")
if path:
with path.open("r") as config_file:
cfg = parse(config_file.read())
else:
cfg = parse(string)
load_dict(cfg)
def load_yaml(path: Optional[Path] = None, string: Optional[str] = None):
try:
from yaml import full_load
except ImportError:
raise DependencyNotInstalled("You must install pyyaml")
if path:
with path.open("r") as config_file:
cfg = full_load(config_file.read())
else:
cfg = full_load(string)
load_dict(cfg)
def load_ini(path: Optional[Path] = None, string: Optional[str] = None):
cfg = configparser.ConfigParser()
cfg.read(path) if path else cfg.read_string(string)
load_dict(cfg.__dict__["_sections"])
def load_json(path: Optional[Path] = None, string: Optional[str] = None):
if path:
with path.open("r") as config_file:
cfg = loads(config_file.read())
else:
cfg = loads(string)
load_dict(cfg)
| 12factor-configclasses | /12factor-configclasses-1.0.0.tar.gz/12factor-configclasses-1.0.0/configclasses/loaders.py | loaders.py |
import os
from dataclasses import is_dataclass
from typing import Any, Dict, List, Tuple, Optional
supported_extensions = (".env", ".toml", ".yaml", ".yml", ".ini", ".cfg", ".json")
converter_types = (int, float)
def get_field_value_from_environ(field_name: Any):
return os.environ.get(str.upper(field_name)) or os.environ.get(field_name)
def get_default_value(field_name: Any, defaults: Dict[str, str]) -> Optional[str]:
if defaults:
return defaults.get(str.upper(field_name)) or defaults.get(field_name)
def fill_init_dict(
class_fields: List[Tuple[Any, Any, Any]], defaults, parent_field_name, prefix
):
init_dict = {}
for class_field_name, class_field_type, class_field_default in class_fields:
origin_field_name = get_origin_field_name(
class_field_name, parent_field_name, prefix
)
if is_dataclass(class_field_type):
init_dict[class_field_name] = class_field_type.from_environ(
defaults, origin_field_name
)
elif field_value := get_field_value_from_environ(
origin_field_name
) or get_default_value(origin_field_name, defaults):
fill_with_environ_or_provided_defaults(
class_field_name, class_field_type, field_value, init_dict
)
else:
init_dict[class_field_name] = class_field_default
return init_dict
def fill_with_environ_or_provided_defaults(
class_field_name, class_field_type, field_value, init_dict
):
if class_field_type in converter_types:
init_dict[class_field_name] = class_field_type(field_value)
elif class_field_type == bool:
init_dict[class_field_name] = field_value in ("True", "true")
else:
init_dict[class_field_name] = field_value
def get_origin_field_name(class_field_name, parent_field_name, prefix):
if not prefix and not parent_field_name:
origin_field_name = class_field_name
elif parent_field_name:
origin_field_name = f"{parent_field_name}_{class_field_name}"
elif prefix:
origin_field_name = f"{prefix}_{class_field_name}"
else:
origin_field_name = f"{prefix}_{parent_field_name}_{class_field_name}"
return origin_field_name
def normalize_field_name(field_name: str):
return str.lower(str(field_name))
| 12factor-configclasses | /12factor-configclasses-1.0.0.tar.gz/12factor-configclasses-1.0.0/configclasses/helpers.py | helpers.py |
from configclasses.configclasses import configclass
| 12factor-configclasses | /12factor-configclasses-1.0.0.tar.gz/12factor-configclasses-1.0.0/configclasses/__init__.py | __init__.py |
class NonSupportedExtension(Exception):
pass
class ConfigFilePathDoesNotExist(Exception):
pass
class DependencyNotInstalled(Exception):
pass
| 12factor-configclasses | /12factor-configclasses-1.0.0.tar.gz/12factor-configclasses-1.0.0/configclasses/exceptions.py | exceptions.py |
import datetime
import logging
import os
from typing import Dict, Tuple, Union, Any, TypeVar, Type
import hvac
from django.apps.config import AppConfig
from django.db.backends.base.base import BaseDatabaseWrapper
from requests.exceptions import RequestException
_log = logging.getLogger(__name__)
class VaultCredentialProviderException(Exception):
pass
class VaultAuthentication:
"""
The basic interface expected by `VaultCredentialProvider`. Most implementations will want to go
with `BaseVaultAuthenticator`.
"""
def authenticated_client(self, *args: Any, **kwargs: Any) -> hvac.Client:
"""
:param args: must be passed on to `hvac.Client`
:param kwargs: must be passed on to `hvac.Client`
:return: A `hvac.Client` instance which is authenticated with Vault
"""
raise NotImplementedError("Subclasses of VaultAuthentication must implement authenticated_client")
# TypeVar for the factory methods in BaseVaultAuthenticator
T = TypeVar('T', bound='BaseVaultAuthenticator')
class BaseVaultAuthenticator(VaultAuthentication):
"""
Use one of the factory methods (`app_id`, `token`, `ssl_client_cert`) to create an instance.
"""
def __init__(self) -> None:
self.credentials = None # type: Union[str, Tuple[str, str]]
self.authtype = None # type: str
self.authmount = None # type: str
self.unwrap_response = False
super().__init__()
@classmethod
def app_id(cls: Type[T], app_id: str, user_id: str) -> T:
i = cls()
i.credentials = (app_id, user_id)
i.authtype = "app-id"
return i
@classmethod
def approle(cls: Type[T], role_id: str, secret_id: str=None, mountpoint: str="approle") -> T:
i = cls()
i.credentials = (role_id, secret_id)
i.authmount = mountpoint
i.authtype = "approle"
return i
@classmethod
def ssl_client_cert(cls: Type[T], certfile: str, keyfile: str) -> T:
if not os.path.isfile(certfile) or not os.access(certfile, os.R_OK):
raise VaultCredentialProviderException("File not found or not readable: %s" % certfile)
if not os.path.isfile(keyfile) or not os.access(keyfile, os.R_OK):
raise VaultCredentialProviderException("File not found or not readable: %s" % keyfile)
i = cls()
i.credentials = (certfile, keyfile)
i.authtype = "ssl"
return i
@classmethod
def token(cls: Type[T], token: str, authtype: str = "token") -> T:
"""
This method can be used to effect many authentication adapters, like
token authenticaation and GitHub
"""
i = cls()
i.credentials = token
i.authtype = authtype
return i
@classmethod
def username_and_password(cls: Type[T], username: str, password: str, authtype: str = "ldap") -> T:
"""
This method can be used for many authentication adapters, like okta, ldap, etc.
"""
i = cls()
i.credentials = (username, password)
i.authtype = authtype
return i
@classmethod
def role_and_jwt(cls: Type[T], role: str, jwt: str, authtype: str = "jwt") -> T:
"""
This method can be used to effect many authentication adapters, like
Kubernetes, Azure, GCP, and JWT/OIDC
"""
i = cls()
i.credentials = (role, jwt)
i.authtype = authtype
return i
def authenticated_client(self, *args: Any, **kwargs: Any) -> hvac.Client:
if self.authtype == "token":
cl = hvac.Client(token=self.credentials, *args, **kwargs)
elif self.authtype == "app-id":
cl = hvac.Client(*args, **kwargs)
cl.auth_app_id(*self.credentials)
elif self.authtype == "ssl":
cl = hvac.Client(cert=self.credentials, *args, **kwargs)
cl.auth.tls.login()
else:
cl = hvac.Client(*args, **kwargs)
try:
auth_adapter = getattr(cl.auth, self.authtype)
except AttributeError:
raise VaultCredentialProviderException("unknown auth method %s" % self.authtype)
auth_adapter.login(*self.credentials, mount_point=self.authmount)
if not cl.is_authenticated():
raise VaultCredentialProviderException("Unable to authenticate Vault client using provided credentials "
"(type=%s)" % self.authtype)
return cl
class VaultAuth12Factor(BaseVaultAuthenticator):
"""
This class configures a Vault client instance from environment variables. The environment variables supported are:
============================ ========================= ==================================
Environment Variable Vault auth backend Direct configuration static method
============================ ========================= ==================================
VAULT_TOKEN Token authentication token(str)
VAULT_APPID, VAULT_USERID App-id authenticaion app_id(str, str)
VAULT_ROLEID, VAULT_SECRETID Approle authentication approle(str, str, str, bool)
VAULT_SSLCERT, VAULT_SSLKEY SSL Client authentication ssl_client_cert(str, str)
============================ ========================= ==================================
It can also be configured directly by calling one of the direct configuration methods.
"""
def __init__(self) -> None:
super().__init__()
@staticmethod
def has_envconfig() -> bool:
"""
(static)
:return: True if enough information is available in the environment to authenticate to Vault
"""
if (os.getenv("VAULT_TOKEN", None) or
(os.getenv("VAULT_APPID", None) and os.getenv("VAULT_USERID", None)) or
(os.getenv("VAULT_SSLCERT", None) and os.getenv("VAULT_SSLKEY", None)) or
(os.getenv("VAULT_ROLEID", None) and os.getenv("VAULT_SECRETID", None))):
return True
return False
@staticmethod
def fromenv() -> 'VaultAuth12Factor':
"""
:return: Load configuration from the environment and return a configured instance
"""
i = None # type: VaultAuth12Factor
if os.getenv("VAULT_TOKEN", None):
i = VaultAuth12Factor.token(os.getenv("VAULT_TOKEN"))
elif os.getenv("VAULT_APPID", None) and os.getenv("VAULT_USERID", None):
i = VaultAuth12Factor.app_id(os.getenv("VAULT_APPID"), os.getenv("VAULT_USERID"))
elif os.getenv("VAULT_ROLEID", None) and os.getenv("VAULT_SECRETID", None):
i = VaultAuth12Factor.approle(os.getenv("VAULT_ROLEID"), os.getenv("VAULT_SECRETID"))
elif os.getenv("VAULT_SSLCERT", None) and os.getenv("VAULT_SSLKEY", None):
i = VaultAuth12Factor.ssl_client_cert(os.getenv("VAULT_SSLCERT"), os.getenv("VAULT_SSLKEY"))
if i:
e = os.getenv("VAULT_UNWRAP", "False")
if e.lower() in ["true", "1", "yes"]:
i.unwrap_response = True
return i
raise VaultCredentialProviderException("Unable to configure Vault authentication from the environment")
class VaultCredentialProvider:
"""
The `VaultCredentialProvider` uses credentials from a `VaultAuthentication` implementation to connect to
Vault and read credentials from `secretpath`. It then provides `username` and `password` as properties while
managing the lease and renewing the credentials as needed.
This class also optionally enforces connection security through `pin_cacert`.
You can use this in a Django `settings.DATABASES` `dict` like this:
.. code-block:: python
VAULT = VaultAuth12Factor.fromenv()
CREDS = VaultCredentialProvider("https://vault.local:8200/", VAULT,
os.getenv("VAULT_DATABASE_PATH", "db-mydatabase/creds/fullaccess"),
os.getenv("VAULT_CA", None), True,
DEBUG)
DATABASES = {
'default': DjangoAutoRefreshDBCredentialsDict(CREDS, {
'ENGINE': 'django.db.backends.postgresql',
'NAME': os.getenv("DATABASE_NAME", "mydatabase"),
'USER': CREDS.username,
'PASSWORD': CREDS.password,
'HOST': '127.0.0.1',
'PORT': '5432',
'SET_ROLE': os.getenv("DATABASE_PARENTROLE", "mydatabaseowner") # requires django-postgresql-setrole
}),
}
"""
def __init__(self, vaulturl: str, vaultauth: VaultAuthentication, secretpath: str, pin_cacert: str=None,
ssl_verify: bool=False, debug_output: bool=False) -> None:
self.vaulturl = vaulturl
self._vaultauth = vaultauth
self.secretpath = secretpath
self.pin_cacert = pin_cacert
self.ssl_verify = ssl_verify
self.debug_output = debug_output
self._cache = None # type: Dict[str, str]
self._leasetime = None # type: datetime.datetime
self._updatetime = None # type: datetime.datetime
self._lease_id = None # type: str
def _now(self) -> datetime.datetime:
return datetime.datetime.now()
def _refresh(self) -> None:
vcl = self._vaultauth.authenticated_client(
url=self.vaulturl,
verify=self.pin_cacert if self.pin_cacert else self.ssl_verify
)
try:
result = vcl.read(self.secretpath)
except RequestException as e:
raise VaultCredentialProviderException(
"Unable to read credentials from path '%s' with request error: %s" %
(self.secretpath, str(e))
) from e
if "data" not in result or "username" not in result["data"] or "password" not in result["data"]:
raise VaultCredentialProviderException(
"Read dict from Vault path %s did not match expected structure (data->{username, password}): %s" %
(self.secretpath, str(result))
)
self._cache = result["data"]
self._lease_id = result["lease_id"]
self._leasetime = self._now()
self._updatetime = self._leasetime + datetime.timedelta(seconds=int(result["lease_duration"]))
_log.debug("Loaded new Vault DB credentials from %s:\nlease_id=%s\nleasetime=%s\nduration=%s\n"
"username=%s\npassword=%s",
self.secretpath,
self._lease_id, str(self._leasetime), result["lease_duration"], self._cache["username"],
self._cache["password"] if self.debug_output else "Password withheld, debug output is disabled")
def _get_or_update(self, key: str) -> str:
if self._cache is None or (self._updatetime - self._now()).total_seconds() < 10:
# if we have less than 10 seconds in a lease ot no lease at all, we get new credentials
_log.info("Vault DB credential lease has expired, refreshing for %s" % key)
self._refresh()
_log.info("refresh done (%s, %s)" % (self._lease_id, str(self._updatetime)))
return self._cache[key]
@property
def username(self) -> str:
return self._get_or_update("username")
@property
def password(self) -> str:
return self._get_or_update("password")
class DjangoAutoRefreshDBCredentialsDict(dict):
def __init__(self, provider: VaultCredentialProvider, *args: Any, **kwargs: Any) -> None:
self._provider = provider
super().__init__(*args, **kwargs)
def refresh_credentials(self) -> None:
self["USER"] = self._provider.username
self["PASSWORD"] = self._provider.password
def __str__(self) -> str:
return "DjangoAutoRefreshDBCredentialsDict(%s)" % super().__str__()
def __repr__(self) -> str:
return "DjangoAutoRefreshDBCredentialsDict(%s)" % super().__repr__()
def refresh_credentials_hook(sender: type, *, dbwrapper: BaseDatabaseWrapper, **kwargs: Any) -> None:
# settings_dict will be the dictionary from the database connection
# so this supports multiple databases in settings.py
if isinstance(dbwrapper.settings_dict, DjangoAutoRefreshDBCredentialsDict):
dbwrapper.settings_dict.refresh_credentials()
class DjangoIntegration(AppConfig):
name = "vault12factor"
def ready(self) -> None:
from django_dbconn_retry import pre_reconnect
pre_reconnect.connect(refresh_credentials_hook)
| 12factor-vault | /12factor_vault-0.1.23-py3-none-any.whl/vault12factor/apps.py | apps.py |
import django
from .apps import *
if django.VERSION < (3, 2):
default_app_config = 'vault12factor.DjangoIntegration'
| 12factor-vault | /12factor_vault-0.1.23-py3-none-any.whl/vault12factor/__init__.py | __init__.py |
def print_lol(the_list):
for each_item in the_list:
if isinstance(each_item, list):
print_lol(each_item)
else:
print(each_item) | 131228_pytest_1 | /131228_pytest_1-1.0.0.tar.gz/131228_pytest_1-1.0.0/nester.py | nester.py |
from distutils.core import setup
setup(
name='131228_pytest_1',
version='1.0.0',
packages=[''],
url='none',
license='none',
author='smith9',
author_email='smith9@ms22.hinet.net',
description='A easy printer of nested list'
)
| 131228_pytest_1 | /131228_pytest_1-1.0.0.tar.gz/131228_pytest_1-1.0.0/setup.py | setup.py |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import, unicode_literals
from setuptools import setup, find_packages
setup(
name='1337',
version='1.0.0',
description='This is so 1337!',
long_description='Run\n\n::\n\n $ 1337\n\n...to be 1337.',
packages=find_packages(),
entry_points={
'console_scripts': [
'1337 = 1337.1337:main',
]
}
)
| 1337 | /1337-1.0.0.tar.gz/1337-1.0.0/setup.py | setup.py |
import requests
import requests_cache
from py1337x import parser
class py1337x():
def __init__(self, proxy=None, cookie=None, cache=None, cacheTime=86400, backend='sqlite'):
self.baseUrl = f'https://www.{proxy}' if proxy else 'https://www.1377x.to'
self.headers = {
'user-agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:88.0) Gecko/20100101 Firefox/88.0',
'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
'accept-language': 'en-US,en;q=0.5',
'upgrade-insecure-requests': '1',
'te': 'trailers'
}
if cookie:
self.headers['cookie'] = f'cf_clearance={cookie}'
self.requests = requests_cache.CachedSession(cache, expire_after=cacheTime, backend=backend) if cache else requests
#: Searching torrents
def search(self, query, page=1, category=None, sortBy=None, order='desc'):
query = '+'.join(query.split())
category = category.upper() if category and category.lower() in ['xxx', 'tv'] else category.capitalize() if category else None
url = f"{self.baseUrl}/{'sort-' if sortBy else ''}{'category-' if category else ''}search/{query}/{category+'/' if category else ''}{sortBy.lower()+'/' if sortBy else ''}{order.lower()+'/' if sortBy else ''}{page}/"
response = self.requests.get(url, headers=self.headers)
return parser.torrentParser(response, baseUrl=self.baseUrl, page=page)
#: Trending torrents
def trending(self, category=None, week=False):
url = f"{self.baseUrl}/trending{'-week' if week and not category else ''}{'/w/'+category.lower()+'/' if week and category else '/d/'+category.lower()+'/' if not week and category else ''}"
response = self.requests.get(url, headers=self.headers)
return parser.torrentParser(response, baseUrl=self.baseUrl)
#: Top 100 torrents
def top(self, category=None):
category = 'applications' if category and category.lower() == 'apps' else 'television' if category and category.lower() == 'tv' else category.lower() if category else None
url = f"{self.baseUrl}/top-100{'-'+category if category else ''}"
response = self.requests.get(url, headers=self.headers)
return parser.torrentParser(response, baseUrl=self.baseUrl)
#: Popular torrents
def popular(self, category, week=False):
url = f"{self.baseUrl}/popular-{category.lower()}{'-week' if week else ''}"
response = self.requests.get(url, headers=self.headers)
return parser.torrentParser(response, baseUrl=self.baseUrl)
#: Browse torrents by category type
def browse(self, category, page=1):
category = category.upper() if category.lower() in ['xxx', 'tv'] else category.capitalize()
url = f'{self.baseUrl}/cat/{category}/{page}/'
response = self.requests.get(url, headers=self.headers)
return parser.torrentParser(response, baseUrl=self.baseUrl, page=page)
#: Info of torrent
def info(self, link=None, torrentId=None):
if not link and not torrentId:
raise TypeError('Missing 1 required positional argument: link or torrentId')
elif link and torrentId:
raise TypeError('Got an unexpected argument: Pass either link or torrentId')
link = f'{self.baseUrl}/torrent/{torrentId}/h9/' if torrentId else link
response = self.requests.get(link, headers=self.headers)
return parser.infoParser(response, baseUrl=self.baseUrl)
| 1337x | /1337x-1.2.4-py3-none-any.whl/py1337x/py1337x.py | py1337x.py |
from bs4 import BeautifulSoup
def torrentParser(response, baseUrl, page=1):
soup = BeautifulSoup(response.content, 'html.parser')
torrentList = soup.select('a[href*="/torrent/"]')
seedersList = soup.select('td.coll-2')
leechersList = soup.select('td.coll-3')
sizeList = soup.select('td.coll-4')
timeList = soup.select('td.coll-date')
uploaderList = soup.select('td.coll-5')
lastPage = soup.find('div', {'class': 'pagination'})
if not lastPage:
pageCount = page
else:
try:
pageCount = int(lastPage.findAll('a')[-1]['href'].split('/')[-2])
except Exception:
pageCount = page
results = {
'items': [],
'currentPage': page or 1,
'itemCount': len(torrentList),
'pageCount': pageCount
}
if torrentList:
for count, torrent in enumerate(torrentList):
name = torrent.getText().strip()
torrentId = torrent['href'].split('/')[2]
link = baseUrl+torrent['href']
seeders = seedersList[count].getText()
leechers = leechersList[count].getText()
size = sizeList[count].contents[0]
time = timeList[count].getText()
uploader = uploaderList[count].getText().strip()
uploaderLink = baseUrl+'/'+uploader+'/'
results['items'].append({
'name': name,
'torrentId': torrentId,
'link': link,
'seeders': seeders,
'leechers': leechers,
'size': size,
'time': time,
'uploader': uploader,
'uploaderLink': uploaderLink
})
return results
def infoParser(response, baseUrl):
soup = BeautifulSoup(response.content, 'html.parser')
name = soup.find('div', {'class': 'box-info-heading clearfix'})
name = name.text.strip() if name else None
shortName = soup.find('div', {'class': 'torrent-detail-info'})
shortName = shortName.find('h3').getText().strip() if shortName else None
description = soup.find('div', {'class': 'torrent-detail-info'})
description = description.find('p').getText().strip() if description else None
genre = soup.find('div', {'class': 'torrent-category clearfix'})
genre = [i.text.strip() for i in genre.find_all('span')] if genre else None
thumbnail = soup.find('div', {'class': 'torrent-image'})
thumbnail = thumbnail.find('img')['src'] if thumbnail else None
if thumbnail and not thumbnail.startswith('http'):
if thumbnail.startswith('//'):
thumbnail = 'https:'+thumbnail
else:
thumbnail = baseUrl+thumbnail
magnetLink = soup.select('a[href^="magnet"]')
magnetLink = magnetLink[0]['href'] if magnetLink else None
infoHash = soup.find('div', {'class': 'infohash-box'})
infoHash = infoHash.find('span').getText() if infoHash else None
images = soup.find('div', {'class': 'tab-pane active'})
images = [i['src'] for i in images.find_all('img')] if images else None
descriptionList = soup.find_all('ul', {'class': 'list'})
if len(descriptionList) > 2:
firstList = descriptionList[1].find_all('li')
secondList = descriptionList[2].find_all('li')
category = firstList[0].find('span').getText()
species = firstList[1].find('span').getText()
language = firstList[2].find('span').getText()
size = firstList[3].find('span').getText()
uploader = firstList[4].find('span').getText().strip()
uploaderLink = baseUrl+'/'+uploader+'/'
downloads = secondList[0].find('span').getText()
lastChecked = secondList[1].find('span').getText()
uploadDate = secondList[2].find('span').getText()
seeders = secondList[3].find('span').getText()
leechers = secondList[4].find('span').getText()
else:
category = species = language = size = uploader = uploaderLink = downloads = lastChecked = uploadDate = seeders = leechers = None
return {
'name': name,
'shortName': shortName,
'description': description,
'category': category,
'type': species,
'genre': genre,
'language': language,
'size': size,
'thumbnail': thumbnail,
'images': images if images else None,
'uploader': uploader,
'uploaderLink': uploaderLink,
'downloads': downloads,
'lastChecked': lastChecked,
'uploadDate': uploadDate,
'seeders': seeders,
'leechers': leechers,
'magnetLink': magnetLink,
'infoHash': infoHash.strip() if infoHash else None
} | 1337x | /1337x-1.2.4-py3-none-any.whl/py1337x/parser.py | parser.py |
from py1337x.py1337x import py1337x | 1337x | /1337x-1.2.4-py3-none-any.whl/py1337x/__init__.py | __init__.py |
153957 theme
============
`View demo album here <https://153957.github.io/153957-theme/>`_
Photo gallery template
----------------------
Web photo gallery templates adapted to my personal preferences.
Usage
-----
This section describes how to install an use this theme.
Installation
~~~~~~~~~~~~
Install the ``153597-theme`` package::
$ pip install 153957-theme
Configure
~~~~~~~~~
In ``sigal.conf.py`` configuration for an album the ``theme`` setting should be
a path to a theme directory. However, since this theme is provided as a Python
package its location might be harder to get. Two options are available for
configuration:
The theme can be configured as a plugin or you can get the path by importing
the package. By setting is as plugin the theme is automatically set.
Set ``theme`` to an empty string and add the theme and menu plugins::
theme = ''
plugins = ['153957_theme.theme', '153957_theme.full_menu', …]
The alternative::
from 153957_theme import theme
theme = theme.get_path()
plugins = ['153957_theme.full_menu', …]
Sources
-------
Based on `sigal <https://sigal.saimon.org/>`_ version of Galleria theme, which is
distributed under the MIT License.
Theme based on `Galleria Classic <https://github.com/GalleriaJS/galleria/>`_,
which is distributed under the MIT License.
| 153957-theme | /153957-theme-2.tar.gz/153957-theme-2/README.rst | README.rst |
from setuptools import setup
setup()
| 153957-theme | /153957-theme-2.tar.gz/153957-theme-2/setup.py | setup.py |
"""Use the 153957-theme as theme for the gallery"""
from pathlib import Path
from sigal import signals
def get_path():
return str(Path(__file__).resolve().parent)
def theme(gallery):
"""Set theme settings to this theme"""
gallery.settings['theme'] = get_path()
def register(settings):
signals.gallery_initialized.connect(theme)
| 153957-theme | /153957-theme-2.tar.gz/153957-theme-2/153957_theme/theme.py | theme.py |
"""Add full menu to gallery
Limitations:
- Currently only supports sorting albums by name in normal order (can not be reversed).
"""
import operator
import os
from sigal import signals
def full_tree(gallery):
"""full menu tree"""
sorted_tree = sorted(gallery.albums.items(), key=operator.itemgetter(0))
gallery.full_tree = dict()
for name, album in sorted_tree:
if name == '.':
continue
ancestors = album.path.split('/')[:-1]
current_ancestor = gallery.full_tree
for ancestor in ancestors:
current_ancestor = current_ancestor[ancestor]['subalbums']
current_ancestor[album.name] = {
'self': album,
'subalbums': dict()
}
def path_to_root(album):
"""url path back to gallery root"""
path_to_root = os.path.relpath('.', album.path)
if path_to_root == '.':
path_to_root = ''
else:
path_to_root += '/'
album.path_to_root = path_to_root
def path_from_root(album):
"""url from gallery root"""
album.path_from_root = album.path
def register(settings):
signals.gallery_initialized.connect(full_tree)
signals.album_initialized.connect(path_to_root)
signals.album_initialized.connect(path_from_root)
| 153957-theme | /153957-theme-2.tar.gz/153957-theme-2/153957_theme/full_menu.py | full_menu.py |
/*! iFrame Resizer (iframeSizer.contentWindow.min.js) - v4.3.1 - 2021-01-11
* Desc: Include this file in any page being loaded into an iframe
* to force the iframe to resize to the content size.
* Requires: iframeResizer.min.js on host page.
* Copyright: (c) 2021 David J. Bradshaw - dave@bradshaw.net
* License: MIT
*/
!function(u){if("undefined"!=typeof window){var n=!0,o=10,i="",r=0,a="",t=null,c="",s=!1,d={resize:1,click:1},l=128,f=!0,m=1,h="bodyOffset",g=h,p=!0,v="",y={},w=32,b=null,T=!1,E=!1,O="[iFrameSizer]",S=O.length,M="",I={max:1,min:1,bodyScroll:1,documentElementScroll:1},N="child",A=!0,C=window.parent,z="*",k=0,R=!1,e=null,x=16,L=1,F="scroll",P=F,D=window,j=function(){ae("onMessage function not defined")},q=function(){},H=function(){},W={height:function(){return ae("Custom height calculation function not defined"),document.documentElement.offsetHeight},width:function(){return ae("Custom width calculation function not defined"),document.body.scrollWidth}},B={},J=!1;try{var U=Object.create({},{passive:{get:function(){J=!0}}});window.addEventListener("test",te,U),window.removeEventListener("test",te,U)}catch(e){}var V,X,Y,K,Q,G,Z=Date.now||function(){return(new Date).getTime()},$={bodyOffset:function(){return document.body.offsetHeight+ve("marginTop")+ve("marginBottom")},offset:function(){return $.bodyOffset()},bodyScroll:function(){return document.body.scrollHeight},custom:function(){return W.height()},documentElementOffset:function(){return document.documentElement.offsetHeight},documentElementScroll:function(){return document.documentElement.scrollHeight},max:function(){return Math.max.apply(null,we($))},min:function(){return Math.min.apply(null,we($))},grow:function(){return $.max()},lowestElement:function(){return Math.max($.bodyOffset()||$.documentElementOffset(),ye("bottom",Te()))},taggedElement:function(){return be("bottom","data-iframe-height")}},_={bodyScroll:function(){return document.body.scrollWidth},bodyOffset:function(){return document.body.offsetWidth},custom:function(){return W.width()},documentElementScroll:function(){return document.documentElement.scrollWidth},documentElementOffset:function(){return document.documentElement.offsetWidth},scroll:function(){return Math.max(_.bodyScroll(),_.documentElementScroll())},max:function(){return Math.max.apply(null,we(_))},min:function(){return Math.min.apply(null,we(_))},rightMostElement:function(){return ye("right",Te())},taggedElement:function(){return be("right","data-iframe-width")}},ee=(V=Ee,Q=null,G=0,function(){var e=Z(),t=x-(e-(G=G||e));return X=this,Y=arguments,t<=0||x<t?(Q&&(clearTimeout(Q),Q=null),G=e,K=V.apply(X,Y),Q||(X=Y=null)):Q=Q||setTimeout(Oe,t),K});ne(window,"message",function(t){var n={init:function(){v=t.data,C=t.source,ue(),f=!1,setTimeout(function(){p=!1},l)},reset:function(){p?re("Page reset ignored by init"):(re("Page size reset by host page"),Ie("resetPage"))},resize:function(){Se("resizeParent","Parent window requested size check")},moveToAnchor:function(){y.findTarget(i())},inPageLink:function(){this.moveToAnchor()},pageInfo:function(){var e=i();re("PageInfoFromParent called from parent: "+e),H(JSON.parse(e)),re(" --")},message:function(){var e=i();re("onMessage called from parent: "+e),j(JSON.parse(e)),re(" --")}};function o(){return t.data.split("]")[1].split(":")[0]}function i(){return t.data.substr(t.data.indexOf(":")+1)}function r(){return t.data.split(":")[2]in{true:1,false:1}}function e(){var e=o();e in n?n[e]():("undefined"==typeof module||!module.exports)&&"iFrameResize"in window||"jQuery"in window&&"iFrameResize"in window.jQuery.prototype||r()||ae("Unexpected message ("+t.data+")")}O===(""+t.data).substr(0,S)&&(!1===f?e():r()?n.init():re('Ignored message of type "'+o()+'". Received before initialization.'))}),ne(window,"readystatechange",Ce),Ce()}function te(){}function ne(e,t,n,o){e.addEventListener(t,n,!!J&&(o||{}))}function oe(e){return e.charAt(0).toUpperCase()+e.slice(1)}function ie(e){return O+"["+M+"] "+e}function re(e){T&&"object"==typeof window.console&&console.log(ie(e))}function ae(e){"object"==typeof window.console&&console.warn(ie(e))}function ue(){!function(){function e(e){return"true"===e}var t=v.substr(S).split(":");M=t[0],r=u!==t[1]?Number(t[1]):r,s=u!==t[2]?e(t[2]):s,T=u!==t[3]?e(t[3]):T,w=u!==t[4]?Number(t[4]):w,n=u!==t[6]?e(t[6]):n,a=t[7],g=u!==t[8]?t[8]:g,i=t[9],c=t[10],k=u!==t[11]?Number(t[11]):k,y.enable=u!==t[12]&&e(t[12]),N=u!==t[13]?t[13]:N,P=u!==t[14]?t[14]:P,E=u!==t[15]?Boolean(t[15]):E}(),re("Initialising iFrame ("+window.location.href+")"),function(){function e(e,t){return"function"==typeof e&&(re("Setup custom "+t+"CalcMethod"),W[t]=e,e="custom"),e}"iFrameResizer"in window&&Object===window.iFrameResizer.constructor&&(function(){var e=window.iFrameResizer;re("Reading data from page: "+JSON.stringify(e)),Object.keys(e).forEach(ce,e),j="onMessage"in e?e.onMessage:j,q="onReady"in e?e.onReady:q,z="targetOrigin"in e?e.targetOrigin:z,g="heightCalculationMethod"in e?e.heightCalculationMethod:g,P="widthCalculationMethod"in e?e.widthCalculationMethod:P}(),g=e(g,"height"),P=e(P,"width"));re("TargetOrigin for parent set to: "+z)}(),function(){u===a&&(a=r+"px");se("margin",function(e,t){-1!==t.indexOf("-")&&(ae("Negative CSS value ignored for "+e),t="");return t}("margin",a))}(),se("background",i),se("padding",c),function(){var e=document.createElement("div");e.style.clear="both",e.style.display="block",e.style.height="0",document.body.appendChild(e)}(),me(),he(),document.documentElement.style.height="",document.body.style.height="",re('HTML & body height set to "auto"'),re("Enable public methods"),D.parentIFrame={autoResize:function(e){return!0===e&&!1===n?(n=!0,ge()):!1===e&&!0===n&&(n=!1,le("remove"),null!==t&&t.disconnect(),clearInterval(b)),Ae(0,0,"autoResize",JSON.stringify(n)),n},close:function(){Ae(0,0,"close")},getId:function(){return M},getPageInfo:function(e){"function"==typeof e?(H=e,Ae(0,0,"pageInfo")):(H=function(){},Ae(0,0,"pageInfoStop"))},moveToAnchor:function(e){y.findTarget(e)},reset:function(){Ne("parentIFrame.reset")},scrollTo:function(e,t){Ae(t,e,"scrollTo")},scrollToOffset:function(e,t){Ae(t,e,"scrollToOffset")},sendMessage:function(e,t){Ae(0,0,"message",JSON.stringify(e),t)},setHeightCalculationMethod:function(e){g=e,me()},setWidthCalculationMethod:function(e){P=e,he()},setTargetOrigin:function(e){re("Set targetOrigin: "+e),z=e},size:function(e,t){Se("size","parentIFrame.size("+((e||"")+(t?","+t:""))+")",e,t)}},function(){if(!0!==E)return;function n(e){Ae(0,0,e.type,e.screenY+":"+e.screenX)}function e(e,t){re("Add event listener: "+t),ne(window.document,e,n)}e("mouseenter","Mouse Enter"),e("mouseleave","Mouse Leave")}(),ge(),y=function(){function r(e){var t=e.getBoundingClientRect(),n={x:window.pageXOffset!==u?window.pageXOffset:document.documentElement.scrollLeft,y:window.pageYOffset!==u?window.pageYOffset:document.documentElement.scrollTop};return{x:parseInt(t.left,10)+parseInt(n.x,10),y:parseInt(t.top,10)+parseInt(n.y,10)}}function n(e){var t,n=e.split("#")[1]||e,o=decodeURIComponent(n),i=document.getElementById(o)||document.getElementsByName(o)[0];u!==i?(t=r(i),re("Moving to in page link (#"+n+") at x: "+t.x+" y: "+t.y),Ae(t.y,t.x,"scrollToOffset")):(re("In page link (#"+n+") not found in iFrame, so sending to parent"),Ae(0,0,"inPageLink","#"+n))}function e(){var e=window.location.hash,t=window.location.href;""!==e&&"#"!==e&&n(t)}function t(){Array.prototype.forEach.call(document.querySelectorAll('a[href^="#"]'),function(e){"#"!==e.getAttribute("href")&&ne(e,"click",function(e){e.preventDefault(),n(this.getAttribute("href"))})})}y.enable?Array.prototype.forEach&&document.querySelectorAll?(re("Setting up location.hash handlers"),t(),ne(window,"hashchange",e),setTimeout(e,l)):ae("In page linking not fully supported in this browser! (See README.md for IE8 workaround)"):re("In page linking not enabled");return{findTarget:n}}(),Se("init","Init message from host page"),q()}function ce(e){var t=e.split("Callback");if(2===t.length){var n="on"+t[0].charAt(0).toUpperCase()+t[0].slice(1);this[n]=this[e],delete this[e],ae("Deprecated: '"+e+"' has been renamed '"+n+"'. The old method will be removed in the next major version.")}}function se(e,t){u!==t&&""!==t&&"null"!==t&&re("Body "+e+' set to "'+(document.body.style[e]=t)+'"')}function de(n){var e={add:function(e){function t(){Se(n.eventName,n.eventType)}B[e]=t,ne(window,e,t,{passive:!0})},remove:function(e){var t=B[e];delete B[e],function(e,t,n){e.removeEventListener(t,n,!1)}(window,e,t)}};n.eventNames&&Array.prototype.map?(n.eventName=n.eventNames[0],n.eventNames.map(e[n.method])):e[n.method](n.eventName),re(oe(n.method)+" event listener: "+n.eventType)}function le(e){de({method:e,eventType:"Animation Start",eventNames:["animationstart","webkitAnimationStart"]}),de({method:e,eventType:"Animation Iteration",eventNames:["animationiteration","webkitAnimationIteration"]}),de({method:e,eventType:"Animation End",eventNames:["animationend","webkitAnimationEnd"]}),de({method:e,eventType:"Input",eventName:"input"}),de({method:e,eventType:"Mouse Up",eventName:"mouseup"}),de({method:e,eventType:"Mouse Down",eventName:"mousedown"}),de({method:e,eventType:"Orientation Change",eventName:"orientationchange"}),de({method:e,eventType:"Print",eventName:["afterprint","beforeprint"]}),de({method:e,eventType:"Ready State Change",eventName:"readystatechange"}),de({method:e,eventType:"Touch Start",eventName:"touchstart"}),de({method:e,eventType:"Touch End",eventName:"touchend"}),de({method:e,eventType:"Touch Cancel",eventName:"touchcancel"}),de({method:e,eventType:"Transition Start",eventNames:["transitionstart","webkitTransitionStart","MSTransitionStart","oTransitionStart","otransitionstart"]}),de({method:e,eventType:"Transition Iteration",eventNames:["transitioniteration","webkitTransitionIteration","MSTransitionIteration","oTransitionIteration","otransitioniteration"]}),de({method:e,eventType:"Transition End",eventNames:["transitionend","webkitTransitionEnd","MSTransitionEnd","oTransitionEnd","otransitionend"]}),"child"===N&&de({method:e,eventType:"IFrame Resized",eventName:"resize"})}function fe(e,t,n,o){return t!==e&&(e in n||(ae(e+" is not a valid option for "+o+"CalculationMethod."),e=t),re(o+' calculation method set to "'+e+'"')),e}function me(){g=fe(g,h,$,"height")}function he(){P=fe(P,F,_,"width")}function ge(){!0===n?(le("add"),function(){var e=w<0;window.MutationObserver||window.WebKitMutationObserver?e?pe():t=function(){function t(e){function t(e){!1===e.complete&&(re("Attach listeners to "+e.src),e.addEventListener("load",i,!1),e.addEventListener("error",r,!1),u.push(e))}"attributes"===e.type&&"src"===e.attributeName?t(e.target):"childList"===e.type&&Array.prototype.forEach.call(e.target.querySelectorAll("img"),t)}function o(e){re("Remove listeners from "+e.src),e.removeEventListener("load",i,!1),e.removeEventListener("error",r,!1),function(e){u.splice(u.indexOf(e),1)}(e)}function n(e,t,n){o(e.target),Se(t,n+": "+e.target.src)}function i(e){n(e,"imageLoad","Image loaded")}function r(e){n(e,"imageLoadFailed","Image load failed")}function a(e){Se("mutationObserver","mutationObserver: "+e[0].target+" "+e[0].type),e.forEach(t)}var u=[],c=window.MutationObserver||window.WebKitMutationObserver,s=function(){var e=document.querySelector("body");return s=new c(a),re("Create body MutationObserver"),s.observe(e,{attributes:!0,attributeOldValue:!1,characterData:!0,characterDataOldValue:!1,childList:!0,subtree:!0}),s}();return{disconnect:function(){"disconnect"in s&&(re("Disconnect body MutationObserver"),s.disconnect(),u.forEach(o))}}}():(re("MutationObserver not supported in this browser!"),pe())}()):re("Auto Resize disabled")}function pe(){0!==w&&(re("setInterval: "+w+"ms"),b=setInterval(function(){Se("interval","setInterval: "+w)},Math.abs(w)))}function ve(e,t){var n=0;return t=t||document.body,n=null!==(n=document.defaultView.getComputedStyle(t,null))?n[e]:0,parseInt(n,o)}function ye(e,t){for(var n=t.length,o=0,i=0,r=oe(e),a=Z(),u=0;u<n;u++)i<(o=t[u].getBoundingClientRect()[e]+ve("margin"+r,t[u]))&&(i=o);return a=Z()-a,re("Parsed "+n+" HTML elements"),re("Element position calculated in "+a+"ms"),function(e){x/2<e&&re("Event throttle increased to "+(x=2*e)+"ms")}(a),i}function we(e){return[e.bodyOffset(),e.bodyScroll(),e.documentElementOffset(),e.documentElementScroll()]}function be(e,t){var n=document.querySelectorAll("["+t+"]");return 0===n.length&&(ae("No tagged elements ("+t+") found on page"),document.querySelectorAll("body *")),ye(e,n)}function Te(){return document.querySelectorAll("body *")}function Ee(e,t,n,o){var i,r;function a(e,t){return!(Math.abs(e-t)<=k)}i=u!==n?n:$[g](),r=u!==o?o:_[P](),a(m,i)||s&&a(L,r)||"init"===e?(Me(),Ae(m=i,L=r,e)):e in{init:1,interval:1,size:1}||!(g in I||s&&P in I)?e in{interval:1}||re("No change in size detected"):Ne(t)}function Oe(){G=Z(),Q=null,K=V.apply(X,Y),Q||(X=Y=null)}function Se(e,t,n,o){R&&e in d?re("Trigger event cancelled: "+e):(e in{reset:1,resetPage:1,init:1}||re("Trigger event: "+t),"init"===e?Ee(e,t,n,o):ee(e,t,n,o))}function Me(){R||(R=!0,re("Trigger event lock on")),clearTimeout(e),e=setTimeout(function(){R=!1,re("Trigger event lock off"),re("--")},l)}function Ie(e){m=$[g](),L=_[P](),Ae(m,L,e)}function Ne(e){var t=g;g=h,re("Reset trigger event: "+e),Me(),Ie("reset"),g=t}function Ae(e,t,n,o,i){var r;!0===A&&(u===i?i=z:re("Message targetOrigin: "+i),re("Sending message to host page ("+(r=M+":"+(e+":"+t)+":"+n+(u!==o?":"+o:""))+")"),C.postMessage(O+r,i))}function Ce(){"loading"!==document.readyState&&window.parent.postMessage("[iFrameResizerChild]Ready","*")}}();
| 153957-theme | /153957-theme-2.tar.gz/153957-theme-2/153957_theme/static/js/iframeResizer.contentWindow.min.js | iframeResizer.contentWindow.min.js |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.