schemas = ['O', 'B-地块编码','I-地块编码', 'B-地块位置', 'I-地块位置', 'B-出让面积', 'I-出让面积', 'B-土地用途', 'I-土地用途', 'B-容积率', 'I-容积率',
           'B-起始价', 'I-起始价', 'B-成交价', 'I-成交价', 'B-溢价率', 'I-溢价率', 'B-成交时间', 'I-成交时间', 'B-受让人', 'I-受让人',
           'B-城市', 'I-城市', 'B-触发词', 'I-触发词']
id2tag = {idx: tag for idx, tag in enumerate(schemas)}
tag2id = {tag: idx for idx, tag in enumerate(schemas)}

#id2tag,tag2id

from datasets import ClassLabel
from transformers import BertForTokenClassification, AutoTokenizer

import torch
import pandas as pd


modelpre = BertForTokenClassification.from_pretrained("xyfigo/MyLandNER")
tokenizerpre = AutoTokenizer.from_pretrained("xyfigo/MyLandNER")

tags = ClassLabel(num_classes=len(schemas), names=schemas)

def tag_text(text, tags, model, tokenizer):
    # Get tokens with special characters
    tokens = tokenizer(text).tokens()
    # Encode the sequence into IDs
    input_ids = tokenizerpre(text, return_tensors="pt").input_ids
    # Get predictions as distribution over 7 possible classes
    outputs = model(input_ids)[0]
    # Take argmax to get most likely class per token
    predictions = torch.argmax(outputs, dim=2)
    # Convert to DataFrame
    preds = [tags.names[p] for p in predictions[0].cpu().numpy()]
    return pd.DataFrame([tokens, preds], index=["Tokens", "Tags"])

text = """
中国网地产讯 7月28日，上海2022年第二轮集中供地第四天，共出让5宗地块，总起始价约120.1亿元。
福建兆润房地产有限公司&上海铧发创盛置业有限公司（厦门建发&华发）通过一次性报价，以总价9.65亿元竞得上海闵行区1总地块，楼面价40875元/㎡，溢价率 8.00%。
地块公告号202205413，地块名称闵行区浦锦街道MHP0-1302单元15-01地块，地块范围为东至：用地红线，西至：浦秀路，南至：江桦路，北至：南江榉路，出让土地面积19673.6平方米，土地用途为居住用地，总起始价约8.94亿元，楼面起始价37846.83元/平方米。
据土地出让文件，该地块规划容积率为1.2，建筑面积23608.32平方米。
"""
#pd.set_option('display.max_columns', None) # 展示所有列

#tag_text(text, tags, modelpre, tokenizerpre)


# 读取原始训练据并进行标准化处理
import json
import numpy, pandas as pd
from datasets import Dataset


def processLabelInALine(line):
    # 初始化空数组
    line = line.strip()
    if len(line) == 0:
        return
    linejson = json.loads(line)
    nertag = numpy.zeros(len(linejson["data"]))
    tokens = [i for i in linejson["data"]]
    for (start, stop, tagstr) in linejson["label"]:
        if tagstr == "NN":
            nertag[start:stop] = 0
        else:
            starttagstr = "B-" + tagstr
            intenelstr = "I-" + tagstr
            nertag[start] = tag2id[starttagstr]
            nertag[start + 1:stop] = tag2id[intenelstr]
    return {"id": linejson["id"], "tokens": tokens, "ner_tags": nertag}


def processFile(filename):
    f = open(filename, 'r', encoding='utf8')
    lines = f.readlines()
    processed = list(map(processLabelInALine, lines))
    processNone = list(filter(None, processed))
    df = pd.DataFrame(processNone)
    landDataset = Dataset.from_pandas(df, split="train")
    return landDataset


#预处理数据，label转化为规范格式。
from datasets import DatasetDict

land_train, land_valid = processFile("train.json"), processFile("valid.json")
dataset = DatasetDict()
dataset['train'] = land_train
dataset['valid'] = land_valid

from datasets import load_dataset, load_metric
from transformers import TrainingArguments, Trainer, DataCollatorForTokenClassification
import numpy as np
# 定义data_collator，并使用seqeval进行评价
data_collator = DataCollatorForTokenClassification(tokenizerpre)
metric = load_metric("seqeval")
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")


from transformers import AutoTokenizer
#加载模型对应的分词器
bert_model_name = "bert-base-chinese"
bert_tokenizer = AutoTokenizer.from_pretrained(bert_model_name)
def tokenize_and_align_labels(examples):
    #输入字的list，输出用tokenizer的词典中序号表示的字，并对特殊字符和子词进行特殊处理。
    tokenized_inputs = bert_tokenizer(examples["tokens"], truncation=True,
                                      is_split_into_words=True)
    #定义空的标签表示
    labels = []
    #迭代输入的ner_tags标签
    for idx, label in enumerate(examples["ner_tags"]):
        #tokenized_input包含word_ids函数，实现对子词与整词的识别。
        #这里我们可以看到word_ids将每个子单词映射到单词序列中对应的索引
        word_ids = tokenized_inputs.word_ids(batch_index=idx)
        previous_word_idx = None
        label_ids = []
        for word_idx in word_ids:
            # 将特殊符号的标签设置为-100，以便在计算损失函数时自动忽略
            if word_idx is None:
                label_ids.append(-100)
            # 把标签设置到每个词的第一个token上
            elif word_idx != previous_word_idx:
                label_ids.append(label[word_idx])
            # 对于每个词的其他token也设置为当前标签
            else:
                label_ids.append(label[word_idx])
            previous_word_idx = word_idx

        labels.append(label_ids)
    #把处理后的labels数组，设置为tokenized_inputs["labels"]
    tokenized_inputs["labels"] = labels
    return tokenized_inputs

tokenized_datasets = dataset.map(tokenize_and_align_labels, batched=True, load_from_cache_file=False, remove_columns=['id', 'ner_tags', 'tokens'])

from torch.nn.functional import cross_entropy

def forward_pass_with_label(batch):
    # Convert dict of lists to list of dicts suitable for data collator
    features = [dict(zip(batch, t)) for t in zip(*batch.values())]
    # Pad inputs and labels and put all tensors on device
    batch = data_collator(features)
    input_ids = batch["input_ids"].to(device)
    attention_mask = batch["attention_mask"].to(device)
    labels = batch["labels"].to(device)
    with torch.no_grad():
        # Pass data through model
        output = modelpre(input_ids, attention_mask)
        # Logit.size: [batch_size, sequence_length, classes]
        # Predict class with largest logit value on classes axis
        predicted_label = torch.argmax(output.logits, axis=-1).cpu().numpy()
    # Calculate loss per token after flattening batch dimension with view
    loss = cross_entropy(output.logits.view(-1, len(schemas)),
                         labels.view(-1), reduction="none")
    # Unflatten batch dimension and convert to numpy array
    loss = loss.view(len(input_ids), -1).cpu().numpy()

    return {"loss":loss, "predicted_label": predicted_label}

valid_set = tokenized_datasets["valid"]
valid_set = valid_set.map(forward_pass_with_label, batched=True, batch_size=8)
df = valid_set.to_pandas()
