from transformers import BertTokenizerFast, AutoModel

from torch.utils.data import Dataset,DataLoader
import torch

bert_model = "asafaya/bert-base-arabic"
# bert_model = "asafaya/bert-large-arabic"
tokenizer = BertTokenizerFast.from_pretrained(bert_model,do_lower_case=True)
model = AutoModel.from_pretrained(bert_model)

line=["محمد الحمادي رئيس تحرير صحيفة الرؤيا - رئيس جمعية الصحفيين الإماراتية","محمد العبار أعطى نموذجًا جميلاً في تطوير أداء منظومة العمل بروح الفريق ."]

r = tokenizer(line,padding=True)
tokens = tokenizer.convert_ids_to_tokens(r['input_ids'][0])
token_spans = tokenizer.encode_plus(line, return_offsets_mapping=True, add_special_tokens=False)["offset_mapping"]

# print(r['input_ids'])
# print(tokens)
# print(r['token_type_ids'])
# print(r['attention_mask'])

class NerDatasets(Dataset):
    def __init__(self,data):
        self.sentences = data
    def __len__(self):
        return len(self.sentences)
    def __getitem__(self, index):
        return self.sentences[index]

def loadCorpus(file_path):
    fr1 = open(file_path+"sentence.txt")
    fr2 = open(file_path+"tags.txt")
    sentences = []
    for line1,line2 in zip(fr1.readlines(),fr2.readlines()):
        sentences.append({"sentence":line1.strip(),"tags":line2.strip()})
    dataset = NerDatasets(sentences)
    return dataset
def loadCorpusForPredict(file_path):
    #不需要 tags.txt
    fr1 = open(file_path+"/sentence.txt")
    # fr2 = open(file_path+"tags.txt")
    sentences = []
    for line1 in fr1.readlines():
        line2 = ""
        for i in range(len(line1.strip())) :
            line2 = line2+'0'+" "
        sentences.append({"sentence":line1.strip(),"tags":line2.strip()})
    dataset = NerDatasets(sentences)
    return dataset
def collate_fn(examples):
    sentences = []
    tags = []
    for example in examples:
        sentences.append(example["sentence"])
        tags.append(example["tags"])
    batchEncoding = tokenizer(sentences,padding=True,truncation=True,max_length=512)
    

    input_ids = batchEncoding["input_ids"]
    token_type_ids = batchEncoding["token_type_ids"]
    attention_mask = batchEncoding["attention_mask"]

    labels=[]
    tokenspans=[]
    tokens_starts = []
    for input_id,tag,sentence in zip(input_ids,tags,sentences):
        token_span = tokenizer.encode_plus(sentence,return_offsets_mapping=True, add_special_tokens=False)["offset_mapping"]
        # print(token_span)
        tokens = tokenizer.convert_ids_to_tokens(input_id)
        tokens_start = []
        for token in tokens:
            if token.find("##") == 0:
                tokens_start.append(0)
            else:
                tokens_start.append(1)
        tokenspans.append(token_span)
        tokens_starts.append(tokens_start)
        i=-1
        label = []
        index = 0
        tag = tag.split(' ')
        # print(tokens)
        raw_line = ""
        for token in tokens:
            i+=1
            if i==0:
                label.append(0)
                continue
            if token=="[SEP]":
                label.append(0)
                break
            q = token_span[i-1][0]
            t = int(tag[q])
            if token_span[i-1][0]>0 and tag[token_span[i-1][0]] == tag[token_span[i-1][0]-1] and int(t)>0:
                t = int(t)+4
                # if t>4 :
                    # pass
                    # assert(label[-1] == t-4 or label[-1] == t)
            label.append(t)
            # index += len(token)+1
            # if token.find("##")==0:
            #     index -=2
        label += [-1]*(len(tokens)-len(label))
        labels.append(label)
        # index-=1
        # print(tokens)
        # print (label)
    input_ids = torch.tensor(input_ids)
    token_type_ids = torch.tensor(token_type_ids)
    attention_mask = torch.tensor(attention_mask)
    label_ids = torch.tensor(labels)
    tokens_starts = torch.tensor(tokens_starts)
    return (input_ids,token_type_ids,attention_mask,label_ids,tokens_starts,tokenspans)
    # return (input_ids,token_type_ids,attention_mask,label_ids)
from ModelForNer import modelForNer     
import os
from torch.optim import AdamW
global args
class args_class:
    pass
args = args_class()
args.learning_rate=1e-6
args.adam_epsilon=1e-6
args.num_train_epochs =12
args.max_seq_length = 512

os.environ["CUDA_VISIBLE_DEVICES"] = "1,2,3"

os.environ["CUDA_VISIBLE_DEVICES"] = "0"
device = torch.device("cuda")

def predict_ner():
    # dataset = loadCorpusForPredict("corpus/news/")
    dataset = loadCorpus("corpus/test/")
    dataloader = DataLoader(dataset,batch_size=1,collate_fn=collate_fn,shuffle=False)
    model = modelForNer(bert_model)
    model.load_state_dict(torch.load("parameter/parameter.pkl"))
    model.to(device)
    model.eval()
    fp = open("output/predict.txt","w")
    ft = open("output/token.txt","w")
    fg = open("output/gold.txt","w")
    f_span = open("output/token_span.txt","w")
    # id2label = { i:label for i, label in enumerate(getNerLabels())}
    for step,batch in enumerate(dataloader):
        if step%100==0:
            print(step)
        # print()
        input_ids,token_type_ids,attention_mask,label_ids,tokens_starts = [x.to(device) for x in batch[:-1]]
        # input_ids,token_type_ids,attention_mask = [x.to(device) for x in batch[:-2]]
        if input_ids.shape[1]>=512:
            print("length:"+str(step))
        logits = model(input_ids,attention_mask,token_type_ids,None,tokens_starts)
        tags = model.crf.decode(logits,attention_mask.cuda())
        tags = tags.squeeze()
        # attention_mask = torch.squeeze(attention_mask)
        # attention_mask = attention_mask.cpu().numpy().tolist()
        # indicies = attention_mask.index(0)
        # tags = tags[1:indicies-1]
        tags = tags.cpu().numpy().tolist()[1:-1]
        label_ids = label_ids.squeeze().cpu().numpy().tolist()[1:-1]
        # print (len(tags))
        tag_=""
        label_=""
        token_ = ""
        span_=""
        tokens = tokenizer.convert_ids_to_tokens(input_ids.squeeze().cpu().numpy().tolist())
        for tag,label,token,tokenspan in zip(tags,label_ids,tokenizer.convert_ids_to_tokens(input_ids.squeeze().cpu().numpy().tolist()[1:-1]),batch[-1][0]):
        # for tag,token,tokenspan in zip(tags,tokenizer.convert_ids_to_tokens(input_ids.squeeze().cpu().numpy().tolist()),batch[-1][0]):
            # label = tag
            # print(label,end=' ')
            # token_ = token_+"\t"+str(token)+"\n"
            # tag_ = tag_+" "+str(token)
            token_ = token_+" "+token
            tag_ = tag_+" "+str(tag)
            label_ = label_+" "+str(label)
            span_ = span_+" "+str(tokenspan)

        # fp.write(token_.strip()+"\n")
        fp.write(tag_.strip()+"\n")
        ft.write(token_.strip()+"\n")
        fg.write(label_.strip()+"\n")
        f_span.write(span_.strip()+"\n")
predict_ner()
import sys
sys.exit()

model = modelForNer(bert_model)
model.load_state_dict(torch.load("parameter-base-twi-news-wiki/parameter-20.pkl"))
model.to(0)
model = torch.nn.DataParallel(model,device_ids=[0,1,2])
model.train()
optimizer = AdamW(model.parameters(), lr=args.learning_rate, eps=args.adam_epsilon)
dataset = loadCorpus("corpus/train/")
dataloader = DataLoader(dataset,batch_size=192,collate_fn=collate_fn,shuffle=True)
num_epoch = 3000
l = len(dataloader)
for epoch in range(21,num_epoch):
    # num_l = 0
    for step,batch in enumerate(dataloader):
        # print(step)
        # print("****")
        input_ids,token_type_ids,attention_mask,label_ids,tokens_starts = [x.to(device) for x in batch[:-1]]
        # print("++++")
        loss = model(input_ids,attention_mask,token_type_ids,label_ids,tokens_starts)
        # for l1 in label_ids.view(-1).cpu().numpy().tolist():
        #     if l1>0 and l1<=4:
        #         num_l+=1
        # print("====")
        if step%10==0:
                import time
                localtime = time.asctime( time.localtime(time.time()) )
                print("epoch"+str(epoch)+" step:"+str(step)+" / "+str(l)+" loss:"+str(loss.mean().item())+"   time:"+localtime)
        optimizer.zero_grad()
        loss.mean().backward()
        optimizer.step()
    if epoch%1==0:
        torch.save(model.module.state_dict(), 'parameter-base-twi-news-wiki/parameter-'+str(epoch)+'.pkl')



