import re
import jieba
from rouge_chinese import Rouge

# 消除jieba的日志
jieba.setLogLevel(jieba.logging.INFO)


# 序号+点句子抽取，返回形式为列表
def extract_sentences(text):
    # (\d+\.|\d+): 匹配英文数字序号，如 "5." 或纯数字序号，如 "6"
    # \s: 匹配空格
    # [\u4e00-\u9fa5A-Za-z]+: 匹配一个或多个中文字符或英文字母
    # ！！注意加上中文的标点符号，不然只截一半！
    pattern = r'((\d+\.)[\s]*)([\u4e00-\u9fa5A-Za-z\s\'？?,.。、——%……^，!！:：“”"]+)'
    matches = re.findall(pattern, text)
    # 将匹配结果转换成列表，每个元素是一个字符串，包含序号和句子
    extracted_sentences = [match[0] + match[2] for match in matches]
    return extracted_sentences


# 把序号和点去掉，只取文本
def get_stripped_sentences(sentence_list):
    new_list = []
    for s in sentence_list:
        new_list.append(s.split('.')[1].strip())
    return new_list


# 从s里边找w，可以筛选过滤
#  if any(find_word_in_string(word, inst) for word in ["image", "images", "graph", "graphs", "picture", "pictures", "file", "files", "map", "maps", "draw", "plot", "go to"]):
#       continue
def find_word_in_string(w, s):
    return re.compile(r'\b({0})\b'.format(w), flags=re.IGNORECASE).search(s)


# 对指令进行分词处理
def tokenized_sentence(sentence):
    return ' '.join(jieba.cut(sentence))


# 使用多进程池来并行计算ROUGE分数
def calculate_rouge_scores(other_instructions, inst):
    rouge = Rouge()
    # 分词
    tokenized_inst = tokenized_sentence(inst)
    tokenized_instruction = tokenized_sentence(other_instructions)
    score = rouge.get_scores(tokenized_instruction, tokenized_inst)
    # 只取ROUGE-L的F1分数
    return score[0]['rouge-l']['f']


if __name__ == '__main__':
    inst = "给我画一张图片，图，表，文件"
    for word in ["图片", "图像", "图表", "表格", "图", "表", "文件", "文档"]:
        if find_word_in_string(word, inst):
            print("非法token：", word)
            break  # 找到第一个非法token后退出循环