import json
import pdfplumber
import jieba
import numpy as np
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.preprocessing import normalize

from transformers import AutoTokenizer, AutoModel
import torch
import numpy as np
from sklearn.preprocessing import normalize
import jieba
import jieba.posseg as pseg

# 读取数据集
questions = json.load(open("questions.json"))
pdf = pdfplumber.open("初赛训练数据集.pdf")
pdf_content = []
for page_idx in range(len(pdf.pages)):
    pdf_content.append({
        'page': 'page_' + str(page_idx + 1),
        'content': pdf.pages[page_idx].extract_text()
    })

# 对文本进行分词
question_words = [' '.join(jieba.lcut(x['question'])) for x in questions]
pdf_content_words = [' '.join(jieba.lcut(x['content'])) for x in pdf_content]

# 提取TFIDF
tfidf = TfidfVectorizer()
tfidf.fit(question_words + pdf_content_words)

print("TFIDF关键词 ---> ")
for question in question_words[:10]:
    print(question)

    words = question.split()
    words = [x for x in words if x in tfidf.vocabulary_]
    words = np.array(words)
    idfs = [tfidf.idf_[tfidf.vocabulary_[x]] for x in words]
    idfs = np.array(idfs)
    top3words = words[idfs.argsort()[::-1]][:3]
    print('Top3 关键词', top3words)
    print("")



tokenizer = AutoTokenizer.from_pretrained("../hugging-face-model/BAAI/bge-small-zh-v1.5/")
model = AutoModel.from_pretrained("../hugging-face-model/BAAI/bge-small-zh-v1.5/")

print("KeyBERT关键词 ---> ")
for question in questions[:10]:
    print(question['question'])
    inputs = tokenizer(question['question'], return_tensors="pt")
    with torch.no_grad():
        logits = model(**inputs)
    token_output = logits.last_hidden_state[0].data.numpy()
    pooler_output = logits.pooler_output.data.numpy()
    
    chars = tokenizer.tokenize(question['question'], add_special_tokens=True)
    words = jieba.lcut(question['question'])

    candidate_words = []
    candidate_scores = []

    total_feat = normalize(token_output.mean(0).reshape(1, -1)).reshape(-1)
    for start_idx, ch in enumerate(chars):
        if start_idx == 0:
            continue
            
        for n in range(2, 5):
            word = ''.join(chars[start_idx: start_idx+n])
            if word in words:
                candidate_words.append(word)

                word_feat = normalize(token_output[start_idx: start_idx+1].mean(0).reshape(1, -1))
                candidate_scores.append(
                    np.dot(word_feat, total_feat)[0]
                )
    
    candidate_words = np.array(candidate_words)
    candidate_scores = np.array(candidate_scores)

    top3words = candidate_words[candidate_scores.argsort()[::-1]][:3]
    print('Top3 关键词', top3words)
    print("")