This checkpoint is a states tuning file from RWKV-6-7B. Please download the base model from https://huggingface.co/BlinkDL/rwkv-6-world/tree/main . It will extract triples according given input and schema. Usage:
update the latest rwkv package: pip install --upgrade rwkv
Download the base model and the states file. You may download the states from the epoch_2 directory.
Following the codes:
- Loading the model and states
from rwkv.model import RWKV
from rwkv.utils import PIPELINE, PIPELINE_ARGS
import torch
# download models: https://huggingface.co/BlinkDL
model = RWKV(model='/media/yueyulin/KINGSTON/models/rwkv6/RWKV-x060-World-7B-v2.1-20240507-ctx4096.pth', strategy='cuda fp16')
print(model.args)
pipeline = PIPELINE(model, "rwkv_vocab_v20230424") # 20B_tokenizer.json is in https://github.com/BlinkDL/ChatRWKV
# use pipeline = PIPELINE(model, "rwkv_vocab_v20230424") for rwkv "world" models
states_file = '/media/yueyulin/data_4t/models/states_tuning/custom_trainer/epoch_2/RWKV-x060-World-7B-v2.1-20240507-ctx4096.pth.pth'
states = torch.load(states_file)
states_value = []
device = 'cuda'
n_head = model.args.n_head
head_size = model.args.n_embd//model.args.n_head
for i in range(model.args.n_layer):
key = f'blocks.{i}.att.time_state'
value = states[key]
prev_x = torch.zeros(model.args.n_embd,device=device,dtype=torch.float16)
prev_states = value.clone().detach().to(device=device,dtype=torch.float16).transpose(1,2)
prev_ffn = torch.zeros(model.args.n_embd,device=device,dtype=torch.float16)
states_value.append(prev_x)
states_value.append(prev_states)
states_value.append(prev_ffn)
The whole schema is 12 types of schema from Wiki:
whole_schema = {
"事件": [
[
"事件_参与者_人物/组织",
"事件_发生地点_地理地区",
"事件_发生时间_时间",
"事件_别名_事件",
"事件_赞助者_人物/组织",
"事件_伤亡人数_度量",
"事件_起因_文本",
"事件_导致_文本",
"事件_主办方_组织",
"事件_所获奖项_专业",
"事件_获胜者_人物/组织"
],
[
"参与者",
"发生地点",
"发生时间",
"别名",
"赞助者",
"伤亡人数",
"起因",
"导致",
"主办方",
"所获奖项",
"获胜者"
]
],
"自然科学": [
[
"产品_别名_产品",
"产品_组成_产品",
"产品_生成物_产品",
"产品_产地_地理地区",
"产品_发现者或发明者_人物/组织",
"产品_名称由来_文本",
"产品_用途_文本"
],
[
"别名",
"组成",
"生成物",
"产地",
"发现者或发明者",
"名称由来",
"用途"
]
],
"建筑结构": [
[
"建筑结构/地理地区_位于_地理地区",
"建筑结构_别名_建筑结构",
"建筑结构_创建时间_时间",
"建筑结构_宽度_度量",
"建筑结构_长度_度量",
"建筑结构_创建者_人物/组织",
"建筑结构_高度_度量",
"建筑结构_面积_度量",
"建筑结构_成就_专业",
"建筑结构_名称由来_文本"
],
[
"位于",
"别名",
"创建时间",
"宽度",
"长度",
"创建者",
"高度",
"面积",
"成就",
"名称由来"
]
],
"地理地区": [
[
"地理地区_位于_地理地区",
"地理地区_别名_地理地区",
"地理地区_人口_度量",
"地理地区_行政中心_地理地区",
"地理地区_面积_度量",
"地理地区_长度_度量",
"地理地区_宽度_度量",
"地理地区_海拔_度量"
],
[
"位于",
"别名",
"人口",
"行政中心",
"面积",
"长度",
"宽度",
"海拔"
]
],
"组织": [
[
"组织/地理地区_位于_地理地区",
"组织_别名_组织",
"组织_子组织_组织",
"组织_成立时间_组织",
"组织_产品_组织",
"组织_成员_组织",
"组织_创办者_组织",
"组织_解散时间_组织"
],
[
"位于",
"别名",
"子组织",
"成立时间",
"产品",
"成员",
"创办者",
"解散时间"
]
],
"医学": [
[
"医学_症状_医学",
"医学_别名_医学",
"医学_发病部位_文本",
"医学_可能后果_文本",
"医学_病因_文本",
"医学_用药_医学",
"医学_疗法_医学",
"医学_传播方式_文本"
],
[
"症状",
"别名",
"发病部位",
"可能后果",
"病因",
"用药",
"疗法",
"传播方式"
]
],
"天文对象": [
[
"天文对象类型_别名_天文对象类型",
"天文对象类型_属于_天文对象类型",
"天文对象类型_发现时间_时间",
"天文对象类型_发现者或发明者_人物/组织",
"天文对象类型_名称由来_文本",
"天文对象类型_绝对星等_度量",
"天文对象类型_直径_度量",
"天文对象类型_质量_度量"
],
[
"别名",
"属于",
"发现时间",
"发现者或发明者",
"名称由来",
"绝对星等",
"直径",
"质量"
]
],
"人造物件": [
[
"产品_别名_产品",
"生物_长度_度量",
"生物_宽度_度量",
"产品_品牌_组织",
"产品_材料_产品",
"产品_产地_地理地区",
"产品_制造商_组织",
"产品_发现者或发明者_人物/组织"
],
[
"别名",
"长度",
"宽度",
"品牌",
"材料",
"产地",
"制造商",
"发现者或发明者"
]
],
"运输": [
[
"运输/地理地区_位于_地理地区",
"运输_成立或创建时间_时间",
"运输_线路_运输",
"运输_开通时间_时间",
"运输_途经_地理地区",
"运输_面积_度量",
"运输_别名_运输",
"运输_长度_度量",
"运输_宽度_度量",
"运输_车站等级_度量"
],
[
"位于",
"成立或创建时间",
"线路",
"开通时间",
"途经",
"面积",
"别名",
"长度",
"宽度",
"车站等级"
]
],
"作品": [
[
"产品_作者_人物",
"产品_出版时间_时间",
"产品_别名_产品",
"产品_产地_地理地区",
"产品_改编自_产品",
"产品_演员_人物/组织",
"产品_出版商_组织",
"产品_成就_专业",
"产品_表演者_人物/组织",
"产品_导演_人物/组织",
"产品_制片人_人物/组织",
"产品_编剧_人物/组织",
"产品_曲目_产品",
"产品_作曲者_人物/组织",
"产品_开发者_人物/组织",
"产品_作词者_人物/组织",
"产品_制作商_组织",
"产品_票房_度量",
"产品_时长_度量",
"产品_平台_组织"
],
[
"作者",
"出版时间",
"别名",
"产地",
"改编自",
"演员",
"出版商",
"成就",
"表演者",
"导演",
"制片人",
"编剧",
"曲目",
"作曲者",
"开发者",
"作词者",
"制作商",
"票房",
"时长",
"平台"
]
],
"生物": [
[
"生物_分布_地理地区",
"生物_父级分类单元_地理地区",
"生物_长度_度量",
"生物_别名_生物",
"生物_学名_生物",
"生物_重量_度量",
"生物_宽度_度量",
"生物_高度_度量",
"生物_主要食物来源_文本"
],
[
"分布",
"父级分类单元",
"长度",
"别名",
"学名",
"重量",
"宽度",
"高度",
"主要食物来源"
]
],
"人物": [
[
"人物_别名_人物",
"人物_出生地点_地理地区",
"人物_出生日期_时间",
"人物_死亡地点_地理地区",
"人物_国籍_地理地区",
"人物_职业_专业",
"人物_作品_产品",
"人物_成就_专业",
"人物_籍贯_地理地区",
"人物_职务_专业",
"人物_配偶_人物",
"人物_父母_人物",
"人物_所属组织_组织",
"人物_死亡日期_时间",
"人物_兄弟姊妹_人物"
],
[
"别名",
"出生地点",
"出生日期",
"死亡地点",
"国籍",
"职业",
"作品",
"成就",
"籍贯",
"职务",
"配偶",
"父母",
"所属组织",
"死亡日期",
"兄弟姊妹"
]
]
}
#\"schema\": [{\"entity_type\": \"人物\", \"attributes\": [\"所属组织\", \"出生日期\", \"职务\", \"父母\", \"籍贯\", \"死亡地点\", \"兄弟姊妹\", \"出生地点\", \"职业\", \"死亡日期\", \"作品\", \"别名\", \"成就\", \"配偶\", \"国籍\"]}]
schemas = {}
for cate, schema in whole_schema.items():
attributes = schema[1]
schemas[cate] = {'entity_type': cate, 'attributes': attributes}
- Try the following examples:
cat_char = '🐱'
bot_char = '🤖'
instruction ='你是一个图谱实体知识结构化专家。请从input中抽取出符合schema定义的实体实例和其属性,不存在的属性不输出,属性存在多值就返回列表。请按照JSON字符串的格式回答。'
schema = schemas['人物']
input_text = "个人简介姓名:拉塞·维比 所属球队:布伦特福德 国籍:丹麦、法国、荷兰、法属圭亚那 出生日期:1987-02-22 身高:181cm 体重:73kg 场上位置:前锋 球衣号码:21 丹麦射手拉塞-维比,获得了2014赛季瑞超联赛金靴"
input_text = {'input': input_text, 'schema': schema}
input_text = json.dumps(input_text).decode('UTF-8')
ctx = f'{cat_char}:{instruction}\n{input_text}\n{bot_char}:'
print(ctx)
def my_print(s):
print(s, end='', flush=True)
# For alpha_frequency and alpha_presence, see "Frequency and presence penalties":
# https://platform.openai.com/docs/api-reference/parameter-details
args = PIPELINE_ARGS(temperature = 1.0, top_p = 0, top_k = 0, # top_k = 0 then ignore
alpha_frequency = 0.25,
alpha_presence = 0.25,
alpha_decay = 0.996, # gradually decay the penalty
token_ban = [0], # ban the generation of some tokens
token_stop = [0,1], # stop generation whenever you see any token here
chunk_len = 256) # split input into chunks to save VRAM (shorter -> slower)
pipeline.generate(ctx, token_count=200, args=args, callback=my_print,state=states_value)
print('\n')
The output looks like:
🐱:你是一个图谱实体知识结构化专家。请从input中抽取出符合schema定义的实体实例和其属性,不存在的属性不输出,属性存在多值就返回列表。请按照JSON字符串的格式回答。
{"input":"个人简介姓名:拉塞·维比 所属球队:布伦特福德 国籍:丹麦、法国、荷兰、法属圭亚那 出生日期:1987-02-22 身高:181cm 体重:73kg 场上位置:前锋 球衣号码:21 丹麦射手拉塞-维比,获得了2014赛季瑞超联赛金靴","schema":{"entity_type":"人物","attributes":["别名","出生地点","出生日期","死亡地点","国籍","职业","作品","成就","籍贯","职务","配偶","父母","所属组织","死亡日期","兄弟姊妹"]}}
🤖:{"result":[{"head":"拉塞·维比","head_type":"人物","relation":"职业","tail":"前锋","tail_type":"专业"},{"head":"拉塞·维比","head_type":"人物","relation":"出生日期","tail":"1987-02-22","tail_type":"时间"},{"head":"拉塞·维比","head_type":"人物","relation":"国籍","tail":"丹麦、法国、荷兰、法属圭亚那","tail_type":"地理地区"},{"head":"拉塞·维比","head_type":"人物","relation":"成就","tail":"2014赛季瑞超联赛金靴","tail_type":"专业"},{"head":"拉塞·维比","head_type":"人物","relation":"所属组织","tail":"布伦特福德","tail_type":"组织"}]}
- Try another example:
schema = schemas['地理地区']
input_text = "赛尔龙乡,是中华人民共和国青海省黄南藏族自治州河南蒙古族自治县下辖的一个乡镇级行政单位。"
input_text = {'input': input_text, 'schema': schema}
input_text = json.dumps(input_text).decode('UTF-8')
ctx = f'{cat_char}:{instruction}\n{input_text}\n{bot_char}:'
The output looks like:
🐱:你是一个图谱实体知识结构化专家。请从input中抽取出符合schema定义的实体实例和其属性,不存在的属性不输出,属性存在多值就返回列表。请按照JSON字符串的格式回答。
{"input":"赛尔龙乡,是中华人民共和国青海省黄南藏族自治州河南蒙古族自治县下辖的一个乡镇级行政单位。","schema":{"entity_type":"地理地区","attributes":["位于","别名","人口","行政中心","面积","长度","宽度","海拔"]}}
🤖:{"result":[{"head":"赛尔龙乡","head_type":"地理地区","relation":"位于","tail":"河南蒙古族自治县","tail_type":"地理地区"},{"head":"河南蒙古族自治县","head_type":"地理地区","relation":"位于","tail":"黄南藏族自治州","tail_type":"地理地区"},{"head":"青海省","head_type":"地理地区","relation":"位于","tail":"中华人民共和国","tail_type":"地理地区"},{"head":"黄南藏族自治州","head_type":"地理地区","relation":"位于","tail":"青海省","tail_type":"地理地区"}]}
Unable to determine this model's library. Check the
docs
.