#encoding=utf-8
from __future__ import unicode_literals
import sys
import logging
import signal
sys.path.append("../")

# import jieba
# import jieba.posseg
# import jieba.analyse
import hanlp
import re
import os
from openai import OpenAI
import json
import semchunk
# import tiktoken                        
from transformers import AutoTokenizer 
import subprocess
import math
import configparser
from llama_cpp import Llama





LOCAL_SELF_DIR = os.path.split(os.path.realpath(__file__))[0]


##########################配置文件处理###################################
# 实例化配置文件
CONFIG_INI = os.path.join(LOCAL_SELF_DIR, 'config.ini')
CONFIG_CONFIG_INI = os.path.join(LOCAL_SELF_DIR, 'config.config_ini')
CONF = configparser.ConfigParser()
if os.path.exists(CONFIG_CONFIG_INI):    
    CONF.read(CONFIG_CONFIG_INI, encoding='utf-8')   
elif os.path.exists(CONFIG_INI):
    CONF.read(CONFIG_INI, encoding='utf-8')
else:
    print("配置文件错误")

# 路径文件
CONFIG_PATH = os.path.join(LOCAL_SELF_DIR, 'config_path.txt')
CONFIG_CONFIG_PATH = os.path.join(LOCAL_SELF_DIR, 'path.txt')   
if os.path.exists(CONFIG_CONFIG_PATH):    
    FILE_PATH_GET = CONFIG_CONFIG_PATH
elif os.path.exists(CONFIG_PATH):
    FILE_PATH_GET = CONFIG_PATH
else:
    print("配置文件错误")

# 设置逆向文件频率（IDF）文本语料库和停止词（Stop Words）文本语料库的路径
THIS_WORK_NAME = CONF['main']['work_name']
SEMCHUNK_NEED_MODEL = CONF['main']['semchunk_model_path'] #本地模型地址，huggingface模型
LOCAL_NEED_MODEL = CONF['main']['local_model_path'] #本地模型地址，gguf模型，仅适用于使用本地模型做判断

LLM = Llama(model_path=LOCAL_NEED_MODEL, chat_format="chatml", n_ctx=40960, n_gpu_layers=20, main_gpu=0, )
# self.LLM = Llama(model_path=LOCAL_NEED_MODEL, chat_format="chatml", n_ctx=40960, seed=1337, ) # 带有可重复性的随机种子
# n_threads 是一个CPU也有的参数，代表最多使用多少线程。
# n_gpu_layers = -1 代表所有层。GPU部署非常重要的一步，代表大语言模型有多少层在GPU运算，如果你的显存出现 out of memory 那就减小 n_gpu_layers

IDF_PATH = f"{LOCAL_SELF_DIR}/idf_{THIS_WORK_NAME}.txt"
IDF_PATH_GRADE_1 = f"{LOCAL_SELF_DIR}/idf_{THIS_WORK_NAME}_grade_1.txt"
IDF_PATH_1 = f"{LOCAL_SELF_DIR}/idf_{THIS_WORK_NAME}_1.txt"

STOP_WORDS_PATH = f"{LOCAL_SELF_DIR}/stop_words.txt"
LOG_LOCAL_THIS_WORK = LOCAL_SELF_DIR + f"/{THIS_WORK_NAME}.log"

API_KEY=CONF['main']['api_key']
BASE_URL = CONF['main']['api_url']
TIME_OUT_LLM = int(CONF['main']['time_out'])
##########################配置文件处理###################################

##########################日志处理#######################################

LOGGER = logging.getLogger('key_werds_get_stream')
LOGGER.setLevel(level=logging.DEBUG)

FORMATTER = logging.Formatter('%(asctime)s - %(filename)s[line:%(lineno)d] - %(levelname)s: %(message)s')

FILE_HANDLER = logging.FileHandler(LOG_LOCAL_THIS_WORK)
FILE_HANDLER.setLevel(level=logging.INFO)
FILE_HANDLER.setFormatter(FORMATTER)

STREAM_HANDLER = logging.StreamHandler()
STREAM_HANDLER.setLevel(logging.DEBUG)
STREAM_HANDLER.setFormatter(FORMATTER)

LOGGER.addHandler(FILE_HANDLER)
LOGGER.addHandler(STREAM_HANDLER)
##########################日志处理#######################################

##########################超时处理#######################################

def handler(signum, frame):
	raise TimeoutError("####################！！处理超时！！####################")
 
##########################超时处理#######################################


##########################设置jieba词典#######################################
# try:
#     jieba.analyse.set_idf_path(idf_path)  # 逆向文件频率（IDF）文本语料库路径
#     jieba.analyse.set_stop_words(stop_words_path)  # 停止词（Stop Words）文本语料库路径
#     logger.info("jieba词典设置成功")
# except Exception as e:
#     logger.critical('#####################jieba词典设置遇到错误##########################')
#     logger.critical(f"报错信息: \n{e}")
#     logger.critical('#####################jieba词典设置遇到错误##########################')
##########################设置jieba词典#######################################


##########################设置hanlp词典#######################################
tok = hanlp.load(hanlp.pretrained.tok.COARSE_ELECTRA_SMALL_ZH)
pos = hanlp.load(hanlp.pretrained.pos.CTB9_POS_ELECTRA_SMALL)
ner = hanlp.load(hanlp.pretrained.ner.MSRA_NER_ELECTRA_SMALL_ZH)

tok.dict_combine = {}             # 合并模式下的分词词典
ner.dict_whitelist = {}     # 白名单词典，命名实体
ner.dict_blacklist = {}               # 黑名单词典，命名实体
DICT_COMBINE_PATH = f"{LOCAL_SELF_DIR}/dict_combine_{THIS_WORK_NAME}.json"
DICT_WHITELIST_PATH = f"{LOCAL_SELF_DIR}/dict_whitelist_{THIS_WORK_NAME}.json"
DICT_BLACKLIST_PATH = f"{LOCAL_SELF_DIR}/dict_blacklist_{THIS_WORK_NAME}.json"
##########################设置hanlp词典#######################################

class TextTools:
    def __init__(self):
        pass
    def text_to_chunk(self, chunk_size: int, text: str, overlap=0.05, processes=1):
        chunker = semchunk.chunkerify(SEMCHUNK_NEED_MODEL, chunk_size)
        return chunker(text, overlap = overlap, processes = processes)

    # 将列表和字符串写入文件
    def write_unique_data(self, filename, new_data):
    #使用集合来存储唯一的数据
        unique_data = set()
        #读取己有的文件内容
        try:
            with open(filename, 'r', encoding='utf-8') as file:
                for line in file:
                    unique_data.add(line.strip())#去除行末的换行符
        except FileNotFoundError:
            #文件不存在,则初始化为空集合
            pass
        #将新的数据写入文件，避免重复
        with open(filename, 'a', encoding='utf-8') as file:
            for item in new_data:
                if item not in unique_data:
                    file.write(item+'\n')#写入新数据
                    unique_data.add(item)#更新唯一数据集合
        #使用示例
        # data_to_write = ["apple", "banana", "orange", "apple"] 
        # write_unique_data('fruits.txt', data_to_write)
        
    # 将字典写入文件
    def write_unique_dict_data(self, filename, new_data: dict):
    #使用集合来存储唯一的数据
        unique_data = set()
        
        #读取己有的文件内容
        try:
            with open(filename, 'r', encoding='utf-8') as file:
                for line in file:
                    # 分列、提取第一列、追加到数据集合
                    split_line = line.split()
                    column_data = split_line[0]
                    unique_data.add(column_data)
        except FileNotFoundError:
            #文件不存在,则初始化为空集合
            pass
        #将新的数据写入文件，避免重复
        with open(filename, 'a', encoding='utf-8') as file:
            res = json.loads(json.dumps(new_data))
            
            # for k,v in new_data.items():
            for k,v in res.items():
                if k not in unique_data:
                    LOGGER.critical(f"k和v分别为: \n{k} \n{v}")
                    try:
                        tmp_i_list = []
                        for i_str in v:
                            tmp_i_list.append(str(i_str))
                        my_string = "\t".join(tmp_i_list)
                    except Exception as e:
                        my_string = v
                        LOGGER.critical(f"字典写入文件的值未能识别: \n{e}")
                    file.write(str(k)+ "\t" +str(my_string)+'\n') #写入新数据
                    unique_data.add(k) #更新唯一数据集合
        
    # 删除词典中的闲置词
    def delete_idle_stop_words(self, filename, banana_words):
        subprocess.run(["sed", "-i", f"/^{banana_words}$/d", filename])
        
        
    # 读取文件整个字符串
    def open_file(self,file):

        with open(file, 'r', encoding='utf-8') as f:
            file_context = f.read()
        # self.file_context = file_context   
        return file_context
    # 读取文件为列表，行划分
    def read_file_list(self,file):
        with open(file, 'r', encoding='utf-8') as f:
            file_context = f.readlines()
        # self.file_context = file_context   
        return file_context
    # 去除数字字母汉字外所有特殊字符
    def clear_context(self,file_context):
        pattern = re.compile("[^a-zA-Z0-9\u4e00-\u9fa5\s]")
        return pattern.sub('', file_context)
      

class Hanlp2Keyword:
    # 利用hanlp识别命名实体
    def hanlp_tok(self, file_context):
        try:
            tok_list = tok(file_context)
            # print("#################")
            # print(tok_list)
        except Exception as e:
            LOGGER.critical(f"hanlp_tok报错信息: \n{e}")
        return tok_list
    def hanlp_pos(self, tok_list):
        try:
            pos_list = pos(tok_list)
            # print("#################")
            # print(pos_list)
            
        except Exception as e:
            LOGGER.critical(f"hanlp_pos报错信息: \n{e}")
        return pos_list
    def hanlp_ner(self, tok_list):
        try:
            ner_list = ner(tok_list, tasks='ner*')
        except Exception as e:
            LOGGER.critical(f"hanlp_ner报错信息: \n{e}")
        return ner_list


class Jieba2Keyword:
    
    def __init__(self):
        pass
    # TF-IDF算法，利用jieba获得关键词，传入一个字符串，输出一个可以用for循环读取的迭代器
    def tfide_jieba_str_to_keyword(self,file_context,keyword_number):
        """_summary_
        
        example:
        for x, w in xxx
            print('%s %s' % (x, w)) 
        xxx 为函数返回的迭代器， x是关键词， w是其权重

        Args:
            str_text (_type_): _str_
            keyword_number (_type_): _d_ 整数，想要多少个关键词

        Returns:
            _type_: _迭代器_
        """
        return jieba.analyse.extract_tags(file_context, withWeight=True, topK=keyword_number)

    # TextRank算法，利用jieba获得关键词，传入一个字符串，输出一个可以用for循环读取的迭代器
    def textrank_jieba_str_to_keyword(self,file_context,keyword_number):
        """_summary_
        
        example:
        for x, w in xxx
            print('%s %s' % (x, w)) 
        xxx 为函数返回的迭代器， x是关键词， w是其权重

        Args:
            str_text (_type_): _str_
            keyword_number (_type_): _d_ 整数，想要多少个关键词

        Returns:
            _type_: _迭代器_
        """
        return jieba.analyse.textrank(file_context, withWeight=True, topK=keyword_number)        
    

class Examination2Datasets:
    """_summary_
    说明：将试题类型的文本转换为测试集
    Returns:
        _type_: _JsonObject_
    """
    
    # 模式识别，识别试题类型
    def examination_type(self,):
        pass
    
    # 单选题
    
    
    
    # 多选题
    
    
    
    # 填空题
    


    # 判断题
    
    
    
    # 简答题
    






class Keywords2Question:
    """_summary_
    说明：调用本地模型提取问题和答案
    Returns:
        _type_: _JsonObject_
    """
    
    # 根据文本块判断，给定关键词相对于文本块是否能够被完全定义。
    def local_llm_keywords_importance(self, chunk, keywords):
        
        # 预定义示例响应（用于few-shot提示）
        example1_response = json.dumps(
            {
                "keywords": ["稀土材料", "稀土元素", "光学性能", "磁学性能", "金属元素", "科技竞争", "产业链", "战略地位", "磁性材料", "催化剂", "光学材料",],
            },
            ensure_ascii=False
        )
        example2_response = json.dumps(
            {
                "keywords": ["稀土永磁材料", "钕铁硼磁体", "电动汽车", "风力发电机", "稀土元素", "储氢合金", "固体氧化物燃料电池", "新兴环保领域",],
            },
            ensure_ascii=False
        )
        example3_response = json.dumps(
            {
                "keywords": ["钕磁铁", "Neodymium magnet", "钕铁硼磁铁", "NdFeB magnet", "Nd₂Fe₁₄B", "钕", "Neodymium", "Nd", "铁", "Iron", "Fe", "硼", "Boron", "B", "四方晶系晶体", "磁能积", "BHmax", "钐钴磁铁",],
            },
            ensure_ascii=False
        )
        example11_response = json.dumps(
            {
                "fully_defined": ["稀土材料",], 
                "partial_association": ["金属元素",],
                "undefined": ["稀土元素", "光学性能", "磁学性能", "科技竞争", "产业链", "战略地位", "磁性材料", "催化剂", "光学材料",],
            },
            ensure_ascii=False
        )
        example22_response = json.dumps(
            {
                "fully_defined": [], 
                "partial_association": [],
                "undefined": ["稀土永磁材料", "钕铁硼磁体", "电动汽车", "风力发电机", "稀土元素", "储氢合金", "固体氧化物燃料电池", "新兴环保领域",],
            },
            ensure_ascii=False
        )
        example33_response = json.dumps(
            {
                "fully_defined": ["钕磁铁", "Neodymium magnet", "钕铁硼磁铁", "NdFeB magnet", "Nd₂Fe₁₄B",], 
                "partial_association": ["钕", "Neodymium", "Nd", "铁", "Iron", "Fe", "硼", "Boron", "B",],
                "undefined": ["四方晶系晶体", "磁能积", "BHmax", "钐钴磁铁",],
            },
            ensure_ascii=False
        )
        try:
            # 设置超时信号
            signal.signal(signal.SIGALRM, handler)
            signal.alarm(TIME_OUT_LLM) # 设置超时时间
            response = LLM.create_chat_completion(
                        messages=[
                            {
                                "role": "system",
                                "content": f"""根据文本块判断，给定关键词相对于文本块是否能够被完全定义或解释。注意，“标注完整名称和符号”不能被归类为完全定义。输出为包含"fully_defined"、"partial_association"和"undefined"的object。
                                示例：
                                {example1_response}
                                TEXT："稀土材料是指一组具有独特光学和磁学性能的金属元素，主要包括17种稀土元素。它们在科技竞争和产业链中具有战略地位，广泛应用于磁性材料、催化剂、光学材料等领域。"
                                A：{example11_response}
                                
                                {example2_response}
                                TEXT："中国的稀土永磁材料产量已占全球总量的90%左右，尤其是钕铁硼磁体在电动汽车和风力发电机中扮演着重要角色。此外，稀土元素在储氢合金和固体氧化物燃料电池等新兴环保领域也具有重要作用。"
                                A：{example22_response}
                                
                                {example3_response}
                                TEXT："钕磁铁（Neodymium magnet）也称为钕铁硼磁铁（NdFeB magnet），化学式为Nd₂Fe₁₄B，是由钕（Neodymium, Nd）、铁（Iron, Fe）、硼（Boron, B）形成的四方晶系晶体。于1982年，住友特殊金属的佐川真人发现钕磁铁。这种磁铁的磁能积（BHmax）大于钐钴磁铁，是当时全世界磁能积最大的物质。"
                                A：{example33_response}""",
                            },
                            {"role": "user", "content": f"""{keywords}
                                                            {chunk}
                                                            """},
                        ],
                        response_format={
                            "type": "json_object",
                            "schema": {
                                "type": "object",
                                "properties": {"fully_defined": {"type": "array"}, "partial_association": {"type": "array"}, "undefined": {"type": "array"}},
                                "required": ["fully_defined", "partial_association", "undefined"],
                            },
                        },
                        temperature=0.6,
                        top_k=20, 
                        top_p=0.95, 
                        min_p=0, 
                    )
            output_content = response["choices"][0]["message"]["content"]
        except Exception as e:
            output_content = None
            LOGGER.critical(f"根据文本块判断，给定关键词相对于文本块是否能够被完全定义时遇到报错。报错信息: \n{e}")             
        return output_content
    
    
    # 根据给定关键词和文本块提出问题，并依据文本块回答问题。
    def local_llm_key_question(self, chunk, keywords):
        
        # 预定义示例响应（用于few-shot提示）
        example1_response = json.dumps(
            {
                "academic": ["稀土材料", "稀土元素", "光学性能", "磁学性能", "金属元素", "科技竞争", "产业链", "战略地位", "磁性材料", "催化剂", "光学材料",] 
            },
            ensure_ascii=False
        )
        example2_response = json.dumps(
            {
                "academic": ["稀土永磁材料", "钕铁硼磁体", "电动汽车", "风力发电机", "稀土元素", "储氢合金", "固体氧化物燃料电池", "新兴环保领域",] 
            },
            ensure_ascii=False
        )
        example3_response = json.dumps(
            {
                "academic": ["钕磁铁", "Neodymium magnet", "钕铁硼磁铁", "NdFeB magnet", "Nd₂Fe₁₄B", "钕", "Neodymium", "Nd", "铁", "Iron", "Fe", "硼", "Boron", "B", "四方晶系晶体", "磁能积", "BHmax", "钐钴磁铁",]
            },
            ensure_ascii=False
        )
        try:
            # 设置超时信号
            signal.signal(signal.SIGALRM, handler)
            signal.alarm(TIME_OUT_LLM) # 设置超时时间
            response = LLM.create_chat_completion(
                        messages=[
                            {
                                "role": "system",
                                "content": f"""以给定的关键词关键词为中心，严格参照文本块内容给出合理的问题（question）和回答（answer）到object中的question和answer。其中，回答（answer）的内容被严格限制在给定的文本块的截取和小幅修改内。
                                                示例：
                                                KEY：稀土材料
                                                TEXT：稀土材料是指一组具有独特光学和磁学性能的金属元素，主要包括17种稀土元素。它们在科技竞争和产业链中具有战略地位，广泛应用于磁性材料、催化剂、光学材料等领域。                                            
                                                A：{example1_response}
                                                
                                                Q：中国的稀土永磁材料产量已占全球总量的90%左右，尤其是钕铁硼磁体在电动汽车和风力发电机中扮演着重要角色。此外，稀土元素在储氢合金和固体氧化物燃料电池等新兴环保领域也具有重要作用。
                                                A：{example2_response}
                                                
                                                Q：钕磁铁（Neodymium magnet）也称为钕铁硼磁铁（NdFeB magnet），化学式为Nd₂Fe₁₄B，是由钕（Neodymium, Nd）、铁（Iron, Fe）、硼（Boron, B）形成的四方晶系晶体。于1982年，住友特殊金属的佐川真人发现钕磁铁。这种磁铁的磁能积（BHmax）大于钐钴磁铁，是当时全世界磁能积最大的物质。
                                                A：{example3_response}""",
                            },
                            {"role": "user", "content": f"{chunk_context}"},
                        ],
                        response_format={
                            "type": "json_object",
                            "schema": {
                                "type": "object",
                                "properties": {"academic": {"type": "array"}},
                                "required": ["academic"],
                            },
                        },
                        temperature=0.6,
                        top_k=20, 
                        top_p=0.95, 
                        min_p=0, 
                    )
            output_content = response["choices"][0]["message"]["content"]
        except Exception as e:
            output_content = None
            LOGGER.critical(f"根据给定关键词和文本块提出问题，并依据文本块回答问题时遇到报错。报错信息: \n{e}") 
        return output_content





class LocalLlmUse:
    """_summary_
    说明：调用本地模型进行判断
    Args: none
    Returns:none        
    """       

    # 本地大模型生成学术词汇列表
    def local_llm_first_to_dist(self, chunk_context):
        
        # 预定义示例响应（用于few-shot提示）
        example1_response = json.dumps(
            {
                "academic": ["稀土材料", "稀土元素", "光学性能", "磁学性能", "金属元素", "科技竞争", "产业链", "战略地位", "磁性材料", "催化剂", "光学材料",] 
            },
            ensure_ascii=False
        )
        example2_response = json.dumps(
            {
                "academic": ["稀土永磁材料", "钕铁硼磁体", "电动汽车", "风力发电机", "稀土元素", "储氢合金", "固体氧化物燃料电池", "新兴环保领域",] 
            },
            ensure_ascii=False
        )
        example3_response = json.dumps(
            {
                "academic": ["钕磁铁", "Neodymium magnet", "钕铁硼磁铁", "NdFeB magnet", "Nd₂Fe₁₄B", "钕", "Neodymium", "Nd", "铁", "Iron", "Fe", "硼", "Boron", "B", "四方晶系晶体", "磁能积", "BHmax", "钐钴磁铁",] 
            },
            ensure_ascii=False
        )
        try:
            # 设置超时信号
            signal.signal(signal.SIGALRM, handler)
            signal.alarm(TIME_OUT_LLM) # 设置超时时间
            response = LLM.create_chat_completion(
                        messages=[
                            {
                                "role": "system",
                                "content": f"""提取文本块中的学术词汇或专业词汇到academic（键）的值中，输出包含academic（键）和数组（值）的object。提取过程中应极为注意，避免选取数字、量词或带有单位的数字作为学术词汇。
                                                示例：
                                                Q：稀土材料是指一组具有独特光学和磁学性能的金属元素，主要包括17种稀土元素。它们在科技竞争和产业链中具有战略地位，广泛应用于磁性材料、催化剂、光学材料等领域。
                                                A：{example1_response}
                                                
                                                Q：中国的稀土永磁材料产量已占全球总量的90%左右，尤其是钕铁硼磁体在电动汽车和风力发电机中扮演着重要角色。此外，稀土元素在储氢合金和固体氧化物燃料电池等新兴环保领域也具有重要作用。
                                                A：{example2_response}
                                                
                                                Q：钕磁铁（Neodymium magnet）也称为钕铁硼磁铁（NdFeB magnet），化学式为Nd₂Fe₁₄B，是由钕（Neodymium, Nd）、铁（Iron, Fe）、硼（Boron, B）形成的四方晶系晶体。于1982年，住友特殊金属的佐川真人发现钕磁铁。这种磁铁的磁能积（BHmax）大于钐钴磁铁，是当时全世界磁能积最大的物质。
                                                A：{example3_response}""",
                            },
                            {"role": "user", "content": f"{chunk_context}"},
                        ],
                        response_format={
                            "type": "json_object",
                            "schema": {
                                "type": "object",
                                "properties": {"academic": {"type": "array"}},
                                "required": ["academic"],
                            },
                        },
                        temperature=0.6,
                        top_k=20, 
                        top_p=0.95, 
                        min_p=0, 
                    )
            output_content = response["choices"][0]["message"]["content"]
        except Exception as e:
            output_content = None
            LOGGER.critical(f"本地大模型生成学术词汇列表时遇到报错。报错信息: \n{e}")
        return output_content
    
    # 本地大模型给词典中的学术词汇分级
    def local_llm_grade_dist(self, word_context):
        """
        词汇分级：
            0 为未分级或分级失败标识
            1 初级
            2 中级
            3 高级
            4 学术专家
            5 专业细分
        分级依据：
            分级依据为相关从业人员接触该级别的概率。
            初级：外行、初级工程师或刚入门从业者有较大接触概率和较大接触频率的词汇。
            中级：中级工程师或熟练从业者有较大接触概率和较大接触频率的词汇，外行仍有较低接触概率。
            高级：高级工程师或资深从业者有较大接触概率和较大接触频率的词汇，外行低接触概率进一步降低。
            学术专家：学术专家或总工级别的从业人员有较大接触概率和较大接触频率的词汇，外行已无接触可能。
            专业细分：高级工程师、学术专家或总工级别的从业人员只有在研究具体专业问题或实施具体项目的场景中才会有较大接触概率和较大接触频率的词汇，若无此类场景即便是专家和总工也无只有较低接触概率，外行已无接触可能。
        """
        try:
            # 设置超时信号
            signal.signal(signal.SIGALRM, handler)
            signal.alarm(TIME_OUT_LLM) # 设置超时时间
            response = LLM.create_chat_completion(
                        messages=[
                            {
                                "role": "system",
                                "content": f"""用分数将用户给出的词汇分级：
                                                        '0' 为未分级或分级失败标识
                                                        '1' 初级
                                                        '2' 中级
                                                        '3' 高级
                                                        '4' 学术专家
                                                        '5' 专业细分
                                                分级依据为不同类型人员接触该级别的概率和频率：
                                                        初级：外行、初级工程师或刚入门从业者有较大接触概率和较大接触频率的词汇。
                                                        中级：中级工程师或熟练从业者有较大接触概率和较大接触频率的词汇，外行仍有较低接触概率。
                                                        高级：高级工程师或资深从业者有较大接触概率和较大接触频率的词汇，外行低接触概率进一步降低。
                                                        学术专家：学术专家或总工级别的从业人员有较大接触概率和较大接触频率的词汇，外行已无接触可能。
                                                        专业细分：高级工程师、学术专家或总工级别的从业人员只有在研究具体专业问题或实施具体项目的场景中才会有较大接触概率和较大接触频率的词汇，若无此类场景即便是专家和总工也无只有较低接触概率，外行已无接触可能。""",
                            },
                            {"role": "user", "content": f"{word_context}"},
                        ],
                        response_format={
                            "type": "json_object",
                            "schema": {
                                "type": "object",
                                "properties": {f"{word_context}": {"type": "number", "multipleOf" : 1, "minimum": 0, "maximum": 5}},
                                "required": [f"{word_context}"],
                            },
                        },
                        temperature=0.6,
                        top_k=20, 
                        top_p=0.95, 
                        min_p=0, 
                    )
            output_content = response["choices"][0]["message"]["content"]
        except Exception as e:
            output_content = None
            LOGGER.critical(f"本地大模型给词典中的学术词汇分级时遇到报错。报错信息: \n{e}")
        return output_content
    
    # 本地大模型同时判断停用词和学术词汇
    def local_llm_judge_academic_or_stop_words(self, chunk_context, part_word_list):
        
        # 预定义示例响应（用于few-shot提示）
        example1_response = json.dumps(
            {
                "part": ["光学材料", "17", "磁学", "磁性材料", "金属元素", "稀土元素", "战略地位", "具有", "稀土", "催化剂", "广泛应用", "一组", "光学", "产业链", "独特", "性能", "竞争", "科技", "材料", "领域", "它们", "包括", "主要", ],
            },
            ensure_ascii=False
        )
        example2_response = json.dumps(
            {
                "part": ["钕铁硼", "90%", "储氢", "永磁", "扮演着", "稀土元素", "磁体", "燃料电池", "电动汽车", "发电机", "稀土", "氧化物", "风力", "合金", "重要", "固体", "角色", "环保", "总量", "新兴", "产量", "材料", "领域", "尤其", "全球", "此外", "左右", "作用", "具有", "中国", ],
            },
            ensure_ascii=False
        )
        example3_response = json.dumps(
            {
                "part": ["磁铁", "磁能积", "Neodymium", "magnet", "Nd", "Fe", "钕铁硼", "NdFeB", "Iron", "Boron", "1982", "佐川", "BHmax", "住友", "化学式", "晶系", "真人", "四方", "晶体", "全世界", "大于", "金属", "特殊", "物质", "称为", "当时", "最大", "发现", "形成", "这种", ],
            },
            ensure_ascii=False
        )
        example11_response = json.dumps(
            {
                "academic_words": ['光学材料',  '磁学',  '磁性材料',  '金属元素',  '稀土元素',  '战略地位',  '稀土',  '催化剂',  '光学',  '产业链',], 
                "idle_words": ['17', '具有', '广泛应用', '一组', '独特', '性能', '竞争', '科技', '材料', '领域', '它们', '包括', '主要'],
                "stop_words": [],
            },
            ensure_ascii=False
        )
        example22_response = json.dumps(
            {
                "academic_words": ['钕铁硼', '储氢', '永磁', '稀土元素', '磁体', '燃料电池', '电动汽车', '发电机', '稀土', '氧化物', '风力', '合金', '环保', '产量', '材料',], 
                "idle_words": ['90%', '重要', '固体', '角色', '总量', '新兴', '领域', '全球', '作用', '具有', '中国',],
                "stop_words": ['扮演着', '尤其', '此外', '左右',],
            },
            ensure_ascii=False
        )
        example33_response = json.dumps(
            {
                "academic_words": ['磁铁', '磁能积', 'Neodymium', 'Nd', 'Fe', '钕铁硼', 'NdFeB', 'Iron', 'Boron', 'BHmax', '化学式', '晶系', '四方', '晶体', '金属',], 
                "idle_words": ['magnet', '1982', '佐川', '住友', '真人', '全世界', '大于', '特殊', '物质', '当时', '最大', '发现', '形成',],
                "stop_words": ['称为', '这种',],
            },
            ensure_ascii=False
        )
        try:
            # 设置超时信号
            signal.signal(signal.SIGALRM, handler)
            signal.alarm(TIME_OUT_LLM) # 设置超时时间
            response = LLM.create_chat_completion(
                        messages=[
                            {
                                "role": "system",
                                "content": f"""基于TEXT文本内容判断给出的"part"中的分词是否为学术词汇（专业词汇）、闲置词或停用词，输出为包含"academic_words"、"stop_words"和"stop_words"的object。其中学术词汇（专业词汇）添加到"academic_words"，停用词添加到"stop_words",其他既不属于学术词汇（专业词汇）也不属于停用词的分词（闲置词）添加到"idle_words"。
                                示例：
                                TEXT："稀土材料是指一组具有独特光学和磁学性能的金属元素，主要包括17种稀土元素。它们在科技竞争和产业链中具有战略地位，广泛应用于磁性材料、催化剂、光学材料等领域。"
                                Q：{example1_response}
                                A：{example11_response}
                                
                                TEXT："中国的稀土永磁材料产量已占全球总量的90%左右，尤其是钕铁硼磁体在电动汽车和风力发电机中扮演着重要角色。此外，稀土元素在储氢合金和固体氧化物燃料电池等新兴环保领域也具有重要作用。"
                                Q：{example2_response}
                                A：{example22_response}
                                
                                TEXT："钕磁铁（Neodymium magnet）也称为钕铁硼磁铁（NdFeB magnet），化学式为Nd₂Fe₁₄B，是由钕（Neodymium, Nd）、铁（Iron, Fe）、硼（Boron, B）形成的四方晶系晶体。于1982年，住友特殊金属的佐川真人发现钕磁铁。这种磁铁的磁能积（BHmax）大于钐钴磁铁，是当时全世界磁能积最大的物质。"
                                Q：{example3_response}
                                A：{example33_response}""",
                            },
                            {"role": "user", "content": f"""
                                                            {chunk_context}
                                                            {part_word_list}
                                                            """},
                        ],
                        response_format={
                            "type": "json_object",
                            "schema": {
                                "type": "object",
                                "properties": {"academic_words": {"type": "array"}, "idle_words": {"type": "array"}, "stop_words": {"type": "array"}},
                                "required": ["academic_words", "idle_words", "stop_words"],
                            },
                        },
                        temperature=0.6,
                        top_k=20, 
                        top_p=0.95, 
                        min_p=0, 
                    )
            output_content = response["choices"][0]["message"]["content"]
        except Exception as e:
            output_content = None
            LOGGER.critical(f"本地大模型同时判断停用词和学术词汇时遇到报错。报错信息: \n{e}")
        return output_content
        



# 在线大模型生成学术词汇列表、判断学术词汇、判断停用词
class LlmUse:
    def __init__(self, api_key, base_url):
        # , idf_path, stop_words_path, chunk_context
        # self.chunk_context = chunk_context
        self.client = OpenAI(
            # 若没有配置环境变量，请用百炼API Key将下行替换为：api_key="sk-xxx",
            api_key=api_key,
            base_url=base_url,
            )
    # 大模型生成学术词汇列表
    def llm_first_to_dist(self, chunk_context):
        # 预定义示例响应（用于few-shot提示）
        example1_response = json.dumps(
            {
                "academic": {"稀土材料": None, "稀土元素": None, "光学性能": None, "磁学性能": None, "金属元素": None, "科技竞争": None, "产业链": None, "战略地位": None, "磁性材料": None, "催化剂": None, "光学材料": None, },
            },
            ensure_ascii=False
        )
        example2_response = json.dumps(
            {
                "academic": {"稀土永磁材料": None, "钕铁硼磁体": None, "电动汽车": None, "风力发电机": None, "稀土元素": None, "储氢合金": None, "固体氧化物燃料电池": None, "新兴环保领域": None, },
            },
            ensure_ascii=False
        )
        example3_response = json.dumps(
            {
                "academic": {"钕磁铁": "Neodymium magnet", "钕铁硼磁铁": "NdFeB magnet", "Nd₂Fe₁₄B": "钕铁硼化学式", "钕": ["Neodymium", "Nd"], "铁": ["Iron", "Fe"], "硼": ["Boron", "B"], "四方晶系晶体": None, "磁能积": "BHmax", "钐钴磁铁": None, },
            },
            ensure_ascii=False
        )

        completion = self.client.chat.completions.create(
            model="qwen-plus",
            messages=[
                {
                    "role": "system",
                    "content": f"""提取文本块中的学术词汇或专业词汇到academic（字典类型），输出包含academic字典的JSON。
                    示例：
                    Q：稀土材料是指一组具有独特光学和磁学性能的金属元素，主要包括17种稀土元素。它们在科技竞争和产业链中具有战略地位，广泛应用于磁性材料、催化剂、光学材料等领域。
                    A：{example1_response}
                    
                    Q：中国的稀土永磁材料产量已占全球总量的90%左右，尤其是钕铁硼磁体在电动汽车和风力发电机中扮演着重要角色。此外，稀土元素在储氢合金和固体氧化物燃料电池等新兴环保领域也具有重要作用。
                    A：{example2_response}
                    
                    Q：钕磁铁（Neodymium magnet）也称为钕铁硼磁铁（NdFeB magnet），化学式为Nd₂Fe₁₄B，是由钕（Neodymium, Nd）、铁（Iron, Fe）、硼（Boron, B）形成的四方晶系晶体。于1982年，住友特殊金属的佐川真人发现钕磁铁。这种磁铁的磁能积（BHmax）大于钐钴磁铁，是当时全世界磁能积最大的物质。
                    A：{example3_response}"""
                },
                {
                    "role": "user",
                    "content": f"{chunk_context}", 
                },
            ],
            response_format={"type": "json_object"},
        )  
        json_string = completion.choices[0].message.content  
        return json_string    
    # 大模型判断学术词汇
    def llm_judge_academic(self, chunk_context, part_word_list):
        # 预定义示例响应（用于few-shot提示）
        example1_response = json.dumps(
            {
                "part": ["光学材料", "17", "磁学", "磁性材料", "金属元素", "稀土元素", "战略地位", "具有", "稀土", "催化剂", "广泛应用", "一组", "光学", "产业链", "独特", "性能", "竞争", "科技", "材料", "领域", "它们", "包括", "主要", ],
            },
            ensure_ascii=False
        )
        example2_response = json.dumps(
            {
                "part": ["钕铁硼", "90%", "储氢", "永磁", "扮演着", "稀土元素", "磁体", "燃料电池", "电动汽车", "发电机", "稀土", "氧化物", "风力", "合金", "重要", "固体", "角色", "环保", "总量", "新兴", "产量", "材料", "领域", "尤其", "全球", "此外", "左右", "作用", "具有", "中国", ],
            },
            ensure_ascii=False
        )
        example3_response = json.dumps(
            {
                "part": ["磁铁", "磁能积", "Neodymium", "magnet", "Nd", "Fe", "钕铁硼", "NdFeB", "Iron", "Boron", "1982", "佐川", "BHmax", "住友", "化学式", "晶系", "真人", "四方", "晶体", "全世界", "大于", "金属", "特殊", "物质", "称为", "当时", "最大", "发现", "形成", "这种", ],
            },
            ensure_ascii=False
        )
        example11_response = json.dumps(
            {
                "judge": {"光学材料": "yes", "17": "no", "磁学": "yes", "磁性材料": "yes", "金属元素": "yes", "稀土元素": "yes", "战略地位": "yes", "具有": "no", "稀土": "yes", "催化剂": "yes", "广泛应用": "no", "一组": "no", "光学": "yes", "产业链": "yes", "独特": "no", "性能": "no", "竞争": "no", "科技": "no", "材料": "no", "领域": "no", "它们": "no", "包括": "no", "主要": "no",},
            },
            ensure_ascii=False
        )
        example22_response = json.dumps(
            {
                "judge": {"钕铁硼": "yes", "90%": "no", "储氢": "yes", "永磁": "yes", "扮演着": "no", "稀土元素": "yes", "磁体": "yes", "燃料电池": "yes", "电动汽车": "yes", "发电机": "yes", "稀土": "yes", "氧化物": "yes", "风力": "yes", "合金": "yes", "重要": "no", "固体": "no", "角色": "no", "环保": "yes", "总量": "no", "新兴": "no", "产量": "yes", "材料": "yes", "领域": "no", "尤其": "no", "全球": "no", "此外": "no", "左右": "no", "作用": "no", "具有": "no", "中国": "no",},
            },
            ensure_ascii=False
        )
        example33_response = json.dumps(
            {
                "judge": {"磁铁": "yes", "磁能积": "yes", "Neodymium": "yes", "magnet": "no", "Nd": "yes", "Fe": "yes", "钕铁硼": "yes", "NdFeB": "yes", "Iron": "yes", "Boron": "yes", "1982": "no", "佐川": "no", "BHmax": "yes", "住友": "no", "化学式": "yes", "晶系": "yes", "真人": "no", "四方": "yes", "晶体": "yes", "全世界": "no", "大于": "no", "金属": "yes", "特殊": "no", "物质": "no", "称为": "no", "当时": "no", "最大": "no", "发现": "no", "形成": "no", "这种": "no",} ,
            },
            ensure_ascii=False
        )

        completion = self.client.chat.completions.create(
            model="qwen-plus",
            messages=[
                {
                    "role": "system",
                    "content": f"""基于TEXT文本内容判断给出的part列表中的分词是否为学术词汇或专业词汇到judge（字典类型），输出包含judge字典的JSON。
                    示例：
                    TEXT："稀土材料是指一组具有独特光学和磁学性能的金属元素，主要包括17种稀土元素。它们在科技竞争和产业链中具有战略地位，广泛应用于磁性材料、催化剂、光学材料等领域。"
                    Q：{example1_response}
                    A：{example11_response}
                    
                    TEXT："中国的稀土永磁材料产量已占全球总量的90%左右，尤其是钕铁硼磁体在电动汽车和风力发电机中扮演着重要角色。此外，稀土元素在储氢合金和固体氧化物燃料电池等新兴环保领域也具有重要作用。"
                    Q：{example2_response}
                    A：{example22_response}
                    
                    TEXT："钕磁铁（Neodymium magnet）也称为钕铁硼磁铁（NdFeB magnet），化学式为Nd₂Fe₁₄B，是由钕（Neodymium, Nd）、铁（Iron, Fe）、硼（Boron, B）形成的四方晶系晶体。于1982年，住友特殊金属的佐川真人发现钕磁铁。这种磁铁的磁能积（BHmax）大于钐钴磁铁，是当时全世界磁能积最大的物质。"
                    Q：{example3_response}
                    A：{example33_response}"""
                },
                {
                    "role": "user",
                    "content": f"""
                    {chunk_context}
                    {part_word_list}
                    """, 
                },
            ],
            response_format={"type": "json_object"},
        )  
        json_string = completion.choices[0].message.content  
        return json_string
    # 大模型判断停用词
    def llm_judge_stop_words(self, chunk_context, part_word_list):
        # 预定义示例响应（用于few-shot提示）
        example1_response = json.dumps(
            {
                "part": ["光学材料", "17", "磁学", "磁性材料", "金属元素", "稀土元素", "战略地位", "具有", "稀土", "催化剂", "广泛应用", "一组", "光学", "产业链", "独特", "性能", "竞争", "科技", "材料", "领域", "它们", "包括", "主要", ],
            },
            ensure_ascii=False
        )
        example2_response = json.dumps(
            {
                "part": ["钕铁硼", "90%", "储氢", "永磁", "扮演着", "稀土元素", "磁体", "燃料电池", "电动汽车", "发电机", "稀土", "氧化物", "风力", "合金", "重要", "固体", "角色", "环保", "总量", "新兴", "产量", "材料", "领域", "尤其", "全球", "此外", "左右", "作用", "具有", "中国", ],
            },
            ensure_ascii=False
        )
        example3_response = json.dumps(
            {
                "part": ["磁铁", "磁能积", "Neodymium", "magnet", "Nd", "Fe", "钕铁硼", "NdFeB", "Iron", "Boron", "1982", "佐川", "BHmax", "住友", "化学式", "晶系", "真人", "四方", "晶体", "全世界", "大于", "金属", "特殊", "物质", "称为", "当时", "最大", "发现", "形成", "这种", ],
            },
            ensure_ascii=False
        )
        example11_response = json.dumps(
            {
                "judge_stop_words": {"光学材料": "not_stop_words", "17": "not_stop_words", "磁学": "not_stop_words", "磁性材料": "not_stop_words", "金属元素": "not_stop_words", "稀土元素": "not_stop_words", "战略地位": "not_stop_words", "具有": "not_stop_words", "稀土": "not_stop_words", "催化剂": "not_stop_words", "广泛应用": "not_stop_words", "一组": "not_stop_words", "光学": "not_stop_words", "产业链": "not_stop_words", "独特": "not_stop_words", "性能": "not_stop_words", "竞争": "not_stop_words", "科技": "not_stop_words", "材料": "not_stop_words", "领域": "not_stop_words", "它们": "not_stop_words", "包括": "not_stop_words", "主要": "not_stop_words",},
            },
            ensure_ascii=False
        )
        example22_response = json.dumps(
            {
                "judge_stop_words": {"钕铁硼": "not_stop_words", "90%": "not_stop_words", "储氢": "not_stop_words", "永磁": "not_stop_words", "扮演着": "stop_words", "稀土元素": "not_stop_words", "磁体": "not_stop_words", "燃料电池": "not_stop_words", "电动汽车": "not_stop_words", "发电机": "not_stop_words", "稀土": "not_stop_words", "氧化物": "not_stop_words", "风力": "not_stop_words", "合金": "not_stop_words", "重要": "not_stop_words", "固体": "not_stop_words", "角色": "not_stop_words", "环保": "not_stop_words", "总量": "not_stop_words", "新兴": "not_stop_words", "产量": "not_stop_words", "材料": "not_stop_words", "领域": "not_stop_words", "尤其": "stop_words", "全球": "not_stop_words", "此外": "stop_words", "左右": "stop_words", "作用": "not_stop_words", "具有": "not_stop_words", "中国": "not_stop_words",},
            },
            ensure_ascii=False
        )
        example33_response = json.dumps(
            {
                "judge_stop_words": {"磁铁": "not_stop_words", "磁能积": "not_stop_words", "Neodymium": "not_stop_words", "magnet": "not_stop_words", "Nd": "not_stop_words", "Fe": "not_stop_words", "钕铁硼": "not_stop_words", "NdFeB": "not_stop_words", "Iron": "not_stop_words", "Boron": "not_stop_words", "1982": "not_stop_words", "佐川": "not_stop_words", "BHmax": "not_stop_words", "住友": "not_stop_words", "化学式": "not_stop_words", "晶系": "not_stop_words", "真人": "not_stop_words", "四方": "not_stop_words", "晶体": "not_stop_words", "全世界": "not_stop_words", "大于": "not_stop_words", "金属": "not_stop_words", "特殊": "not_stop_words", "物质": "not_stop_words", "称为": "stop_words", "当时": "not_stop_words", "最大": "not_stop_words", "发现": "not_stop_words", "形成": "not_stop_words", "这种": "stop_words",} ,
            },
            ensure_ascii=False
        )

        completion = self.client.chat.completions.create(
            model="qwen-plus",
            messages=[
                {
                    "role": "system",
                    "content": f"""基于TEXT文本内容判断给出的part列表中的分词是否为停用词到judge_stop_words（字典类型），输出包含judge_stop_words字典的JSON。
                    示例：
                    TEXT："稀土材料是指一组具有独特光学和磁学性能的金属元素，主要包括17种稀土元素。它们在科技竞争和产业链中具有战略地位，广泛应用于磁性材料、催化剂、光学材料等领域。"
                    Q：{example1_response}
                    A：{example11_response}
                    
                    TEXT："中国的稀土永磁材料产量已占全球总量的90%左右，尤其是钕铁硼磁体在电动汽车和风力发电机中扮演着重要角色。此外，稀土元素在储氢合金和固体氧化物燃料电池等新兴环保领域也具有重要作用。"
                    Q：{example2_response}
                    A：{example22_response}
                    
                    TEXT："钕磁铁（Neodymium magnet）也称为钕铁硼磁铁（NdFeB magnet），化学式为Nd₂Fe₁₄B，是由钕（Neodymium, Nd）、铁（Iron, Fe）、硼（Boron, B）形成的四方晶系晶体。于1982年，住友特殊金属的佐川真人发现钕磁铁。这种磁铁的磁能积（BHmax）大于钐钴磁铁，是当时全世界磁能积最大的物质。"
                    Q：{example3_response}
                    A：{example33_response}"""
                },
                {
                    "role": "user",
                    "content": f"""
                    {chunk_context}
                    {part_word_list}
                    """, 
                },
            ],
            response_format={"type": "json_object"},
        )  
        json_string = completion.choices[0].message.content  
        return json_string
    # 大模型同时判断停用词和学术词汇
    def llm_judge_academic_or_stop_words(self, chunk_context, part_word_list):
        # 预定义示例响应（用于few-shot提示）
        example1_response = json.dumps(
            {
                "part": ["光学材料", "17", "磁学", "磁性材料", "金属元素", "稀土元素", "战略地位", "具有", "稀土", "催化剂", "广泛应用", "一组", "光学", "产业链", "独特", "性能", "竞争", "科技", "材料", "领域", "它们", "包括", "主要", ],
            },
            ensure_ascii=False
        )
        example2_response = json.dumps(
            {
                "part": ["钕铁硼", "90%", "储氢", "永磁", "扮演着", "稀土元素", "磁体", "燃料电池", "电动汽车", "发电机", "稀土", "氧化物", "风力", "合金", "重要", "固体", "角色", "环保", "总量", "新兴", "产量", "材料", "领域", "尤其", "全球", "此外", "左右", "作用", "具有", "中国", ],
            },
            ensure_ascii=False
        )
        example3_response = json.dumps(
            {
                "part": ["磁铁", "磁能积", "Neodymium", "magnet", "Nd", "Fe", "钕铁硼", "NdFeB", "Iron", "Boron", "1982", "佐川", "BHmax", "住友", "化学式", "晶系", "真人", "四方", "晶体", "全世界", "大于", "金属", "特殊", "物质", "称为", "当时", "最大", "发现", "形成", "这种", ],
            },
            ensure_ascii=False
        )
        example11_response = json.dumps(
            {
                "judge": {'光学材料': 'academic_words', '17': 'idle_words', '磁学': 'academic_words', '磁性材料': 'academic_words', '金属元素': 'academic_words', '稀土元素': 'academic_words', '战略地位': 'academic_words', '具有': 'idle_words', '稀土': 'academic_words', '催化剂': 'academic_words', '广泛应用': 'idle_words', '一组': 'idle_words', '光学': 'academic_words', '产业链': 'academic_words', '独特': 'idle_words', '性能': 'idle_words', '竞争': 'idle_words', '科技': 'idle_words', '材料': 'idle_words', '领域': 'idle_words', '它们': 'idle_words', '包括': 'idle_words', '主要': 'idle_words',},
            },
            ensure_ascii=False
        )
        example22_response = json.dumps(
            {
                "judge": {'钕铁硼': 'academic_words', '90%': 'idle_words', '储氢': 'academic_words', '永磁': 'academic_words', '扮演着': 'stop_words', '稀土元素': 'academic_words', '磁体': 'academic_words', '燃料电池': 'academic_words', '电动汽车': 'academic_words', '发电机': 'academic_words', '稀土': 'academic_words', '氧化物': 'academic_words', '风力': 'academic_words', '合金': 'academic_words', '重要': 'idle_words', '固体': 'idle_words', '角色': 'idle_words', '环保': 'academic_words', '总量': 'idle_words', '新兴': 'idle_words', '产量': 'academic_words', '材料': 'academic_words', '领域': 'idle_words', '尤其': 'stop_words', '全球': 'idle_words', '此外': 'stop_words', '左右': 'stop_words', '作用': 'idle_words', '具有': 'idle_words', '中国': 'idle_words',},
            },
            ensure_ascii=False
        )
        example33_response = json.dumps(
            {
                "judge": {'磁铁': 'academic_words', '磁能积': 'academic_words', 'Neodymium': 'academic_words', 'magnet': 'idle_words', 'Nd': 'academic_words', 'Fe': 'academic_words', '钕铁硼': 'academic_words', 'NdFeB': 'academic_words', 'Iron': 'academic_words', 'Boron': 'academic_words', '1982': 'idle_words', '佐川': 'idle_words', 'BHmax': 'academic_words', '住友': 'idle_words', '化学式': 'academic_words', '晶系': 'academic_words', '真人': 'idle_words', '四方': 'academic_words', '晶体': 'academic_words', '全世界': 'idle_words', '大于': 'idle_words', '金属': 'academic_words', '特殊': 'idle_words', '物质': 'idle_words', '称为': 'stop_words', '当时': 'idle_words', '最大': 'idle_words', '发现': 'idle_words', '形成': 'idle_words', '这种': 'stop_words',} ,
            },
            ensure_ascii=False
        )

        completion = self.client.chat.completions.create(
            model="qwen-plus",
            messages=[
                {
                    "role": "system",
                    "content": f"""基于TEXT文本内容判断给出的part列表中的分词是否为学术词汇（专业词汇）或停用词到judge（字典类型），其中学术词汇（专业词汇）赋值为"academic_words"，停用词赋值为"stop_words",其他既不属于学术词汇（专业词汇）也不属于停用词的分词赋值为"idle_words"，输出包含judge字典的JSON。
                    示例：
                    TEXT："稀土材料是指一组具有独特光学和磁学性能的金属元素，主要包括17种稀土元素。它们在科技竞争和产业链中具有战略地位，广泛应用于磁性材料、催化剂、光学材料等领域。"
                    Q：{example1_response}
                    A：{example11_response}
                    
                    TEXT："中国的稀土永磁材料产量已占全球总量的90%左右，尤其是钕铁硼磁体在电动汽车和风力发电机中扮演着重要角色。此外，稀土元素在储氢合金和固体氧化物燃料电池等新兴环保领域也具有重要作用。"
                    Q：{example2_response}
                    A：{example22_response}
                    
                    TEXT："钕磁铁（Neodymium magnet）也称为钕铁硼磁铁（NdFeB magnet），化学式为Nd₂Fe₁₄B，是由钕（Neodymium, Nd）、铁（Iron, Fe）、硼（Boron, B）形成的四方晶系晶体。于1982年，住友特殊金属的佐川真人发现钕磁铁。这种磁铁的磁能积（BHmax）大于钐钴磁铁，是当时全世界磁能积最大的物质。"
                    Q：{example3_response}
                    A：{example33_response}"""
                },
                {
                    "role": "user",
                    "content": f"""
                    {chunk_context}
                    {part_word_list}
                    """, 
                },
            ],
            response_format={"type": "json_object"},
        )  
        json_string = completion.choices[0].message.content  
        return json_string


class MainJob:
    def __init__(self, api_key="", base_url=""):
        # 传入API
        self.api_key = api_key
        self.base_url = base_url
        
        
    def local_workflow_hanlp_for_words(self, file_path, chunk_sizz):
        total_chunk_num = 0
        total_words_num = 0
        total_academic_ner = {}
        total_timeout_chunk_num = 0
        total_timeout_judge_num = 0
        
        
        for n in file_path:
            LOGGER.info(f"读取文件！\n 文件地址：{n}")
            all_init_dict = []
            try:
                # all_text 是读取文件后的字符串
                all_text = TextTools().open_file(n)
                # all_text = jieba_to_keyword().clear_context(all_text) # 清除所有特殊字符
                LOGGER.info("读取文件成功！")
            except Exception as e:
                LOGGER.critical('#####################读取文件可能遇到错误##########################')
                LOGGER.critical(f"现在处理的文件：{n}")
                LOGGER.critical(f"报错信息: \n{e}")
                LOGGER.critical('#####################读取文件可能遇到错误##########################')
                continue
            # all_chunk 是对 all_text 语义分割后的块列表
            total_words_num = total_words_num + len(all_text)
            all_chunk = TextTools().text_to_chunk(chunk_sizz,all_text, overlap=0, processes=1)
            total_chunk_num = total_chunk_num + len(all_chunk)
            LOGGER.info("语义分割成功！")
            LOGGER.info(all_chunk) # debug
            LOGGER.info("遍历块-词典初始化…………………………………………………………………………………………………………")
            # 遍历块-词典初始化
            for chunk in all_chunk:
                # 生成块内学术词汇列表
                LOGGER.info(f"现在处理块：\n")
                LOGGER.info(chunk) # debug
                # the_list_dist = []
                try:
                    academic_dict = LocalLlmUse().local_llm_first_to_dist(chunk)
                    LOGGER.info(academic_dict) # debug
                except TimeoutError:
                    total_timeout_chunk_num = total_timeout_chunk_num + 1
                    LOGGER.info("*********************")
                    LOGGER.info("####################！！处理超时！！####################")
                    LOGGER.info(chunk)
                    LOGGER.info("*********************")
                
                
                try:
                    academic_dict_list = json.loads(academic_dict)["academic"]
                    LOGGER.info("*********************")
                    LOGGER.info(academic_dict)
                    LOGGER.info("*********************")
                    # for ii in academic_dict_json:
                    #     the_list_dist.append(ii)
                    LOGGER.info("块内首次生成列表成功")
                except Exception as e:
                    LOGGER.critical('#####################块内首次生成词典遇到错误##########################')
                    LOGGER.critical(f"报错信息: \n{e}")
                    LOGGER.critical(f"现在处理块：\n {chunk} ")
                    LOGGER.critical(f"匹配公式：\n{chunk[0:20]}.*{chunk[-20:]}")
                    LOGGER.info("*********************")
                    LOGGER.info(academic_dict)
                    LOGGER.info("*********************")
                    LOGGER.critical('#####################块内首次生成词典遇到错误##########################')
                    continue
                
                # 将 the_list_dist 写入 hanlp 分词词典和合并词典
                try:
                    # tok.dict_combine.update(the_list_dist)             # 合并模式下的分词词典
                    # ner.dict_whitelist.update(the_list_dist)     # 白名单词典，命名实体
                    # ner.dict_blacklist.update(the_list_dist)               # 黑名单词典，命名实体
                    TextTools().write_unique_data(IDF_PATH, academic_dict_list)
                    for i1 in academic_dict_list:
                        all_init_dict.append(i1)
                    # text_tools().write_unique_data(DICT_WHITELIST_PATH, the_list_dist)
                    LOGGER.info("块内首次写入词典初始化文件成功")
                except Exception as e:
                    LOGGER.critical('#####################块内首次写入词典遇到错误##########################')
                    LOGGER.critical(f"报错信息: \n{e}")
                    LOGGER.critical(f"现在处理块：\n {chunk} ")
                    LOGGER.critical(f"匹配公式：{chunk[0:20]}.*{chunk[-20:]}")
                    LOGGER.critical('#####################块内首次写入词典遇到错误##########################')        
            LOGGER.info("结束词典初始化…………………………………………………………………………………………………………………………………………")
            
            # 读取词典初始化文件
            # try:
            #     all_init_dict = hanlp_to_keyword().read_file_list(IDF_PATH)
                
            #     LOGGER.info("读取词典初始化文件成功！")
            # except Exception as e:
            #     LOGGER.critical('#####################读取词典初始化文件可能遇到错误##########################')
            #     LOGGER.critical(f"现在处理的文件：{n}")
            #     LOGGER.critical(f"报错信息: \n{e}")
            #     LOGGER.critical('#####################读取词典初始化文件可能遇到错误##########################')
            #     continue
            the_list_dist = {}
            for ii in all_init_dict:
                the_list_dist.update({ii: ''})
            tok.dict_combine.update(the_list_dist)             # 合并模式下的分词词典
            TextTools().write_unique_data(DICT_COMBINE_PATH, the_list_dist)
            
            
            
            LOGGER.info("遍历块-hanlp处理……………………………………………………………………………………………………………………………………")
            ini_tmp_dict_whitelist = {}
            ini_tmp_dict_blacklist = {}
            # 遍历块-hanlp处理
            for chunk in all_chunk:          
                # 生成 hanlp 关键词提取列表
                LOGGER.info(f"现在处理块：\n{chunk}")
                #  结合分词与词性识别
                tok_list = Hanlp2Keyword().hanlp_tok(chunk)
                pos_list = Hanlp2Keyword().hanlp_pos(tok_list)
                the_tok_pos = {}
                for i,v in enumerate(tok_list):
                    the_tok_pos.update({v: pos_list[i]})
                
                # 
                tmp_dict_whitelist = {}            
                for iii in the_tok_pos:
                    if iii in the_list_dist:                        
                        tmp_dict_whitelist.update({iii: the_tok_pos[iii]})
                        
                ner.dict_whitelist.update(tmp_dict_whitelist)  # 白名单词典，命名实体
                ini_tmp_dict_whitelist.update(tmp_dict_whitelist)
                # 命名实体识别
                the_result = Hanlp2Keyword().hanlp_ner(tok_list)
                
                
                the_key_hanlp_list = []
                for i in the_result:
                    the_key_hanlp_list.append(i[0])
                    print(f"$$$$ 被处理词 i 为：{i} ") 
                    if i[0] in all_init_dict:
                        
                        try:
                            score = LocalLlmUse().local_llm_grade_dist(i[0])
                            dict_obj = json.loads(score)
                            print(f"$$$$分数为：{dict_obj[i[0]]} ，对应的词汇： {i}") 
                            total_academic_ner.update({i[0]: [dict_obj[i[0]], i[1]]})
                        except Exception as e:
                            LOGGER.info("块内识别关键词、闲置词和停用词成功！")
                        
                
                LOGGER.info(f"$$$$总体分数为：\n {total_academic_ner}") 
                
                    
                # 利用llm_use识别提取的关键词
                try:
                    judge_academic_or_stop = LocalLlmUse().local_llm_judge_academic_or_stop_words(chunk,the_key_hanlp_list)
                    LOGGER.info("块内识别关键词、闲置词和停用词成功！")
                    LOGGER.info("*********************")
                    LOGGER.info(judge_academic_or_stop)
                    LOGGER.info("*********************")
                except TimeoutError:
                    total_timeout_judge_num = total_timeout_judge_num + 1
                    LOGGER.info("*********************")
                    LOGGER.info("####################！！处理超时！！####################")
                    LOGGER.info(chunk)
                    LOGGER.info("*********************")
                except Exception as e:
                    LOGGER.critical('#####################块内识别关键词、闲置词和停用词遇到错误##########################')
                    LOGGER.critical(f"报错信息: \n{e}")
                    LOGGER.critical(f"现在处理块：\n {chunk}")
                    LOGGER.critical(f"匹配公式：{chunk[0:20]}.*{chunk[-20:]}")
                    LOGGER.info("*********************")
                    LOGGER.info(judge_academic_or_stop)
                    LOGGER.info("*********************")
                    LOGGER.critical('#####################块内识别关键词、闲置词和停用词遇到错误##########################')
                try:
                    json_of_part = json.loads(judge_academic_or_stop)
                    
                except Exception as e:
                    LOGGER.critical('#####################加载处理闲置词和停用词遇到错误##########################')
                    LOGGER.critical(f"报错信息: \n{e}")
                    LOGGER.critical(f"现在处理块：{chunk}")
                    LOGGER.critical(f"匹配公式：{chunk[0:20]}.*{chunk[-20:]}")
                    LOGGER.info("*********************")
                    LOGGER.info(json_of_part)
                    LOGGER.info("*********************")
                    LOGGER.critical('#####################加载处理闲置词和停用词遇到错误##########################')
                    continue
                
                
                try:
                    academic_words = json_of_part["academic_words"]
                    idle_words = json_of_part["idle_words"]
                    stop_words= json_of_part["stop_words"]
                except Exception as e:
                    LOGGER.error("关键词列表识别错误！！！")
                    LOGGER.critical(f"报错信息: \n{e}")
                # 先将识别的关键词添加进入词典初始化文件。
                # text_tools().write_unique_data(idf_path, academic_words)
                # 将识别的停用词添加进入停用词文件
                TextTools().write_unique_data(STOP_WORDS_PATH, stop_words)
                # 将闲置词从词典文件中删除
                for i in idle_words:
                    # TextTools().delete_idle_stop_words(IDF_PATH, i)
                    ner.dict_blacklist.update({i: ''})
                    ini_tmp_dict_blacklist.update({i: ''})
                # 将停止词从词典文件中删除
                for i in stop_words:
                    # TextTools().delete_idle_stop_words(IDF_PATH, i)
                    ner.dict_blacklist.update({i: ''})
                    ini_tmp_dict_blacklist.update({i: ''})
                # all_init_dict = Hanlp2Keyword().read_file_list(IDF_PATH)
            
            LOGGER.info("结束-hanlp处理………………………………………………………………………………………………………………………………")    
            LOGGER.info("文件处理完毕！")
        TextTools().write_unique_data(DICT_WHITELIST_PATH, ini_tmp_dict_whitelist)
        TextTools().write_unique_data(DICT_BLACKLIST_PATH, ini_tmp_dict_blacklist)
        TextTools().write_unique_dict_data(IDF_PATH_GRADE_1, total_academic_ner)    
        with open(IDF_PATH, 'r') as file:
            num_lines = sum(1 for line in file)   
            
        subprocess.run(["cp", IDF_PATH, IDF_PATH_1])
        subprocess.run(["sed", "-i", f"/^[0-9]$/d", IDF_PATH_1])
        with open(IDF_PATH_1, 'r') as file:
            num_lines_1 = sum(1 for line in file)
        LOGGER.info(f"""
                    总结………………………………………………………………………………………………………………………………
                    总文件数：                  {len(file_path)}
                    总文本块：                  {total_chunk_num}
                    总字符数：                  {total_words_num}
                    学术词汇初始化数：           {len(the_list_dist)}
                    hanlp处理后的学术词汇数：    {num_lines}
                    去除纯数字后词汇数：         {num_lines_1}
                    超时的文本块（初始）：       {total_timeout_chunk_num}
                    超时的文本块（分词）：       {total_timeout_judge_num}
                    """)   
            
                
    def local_workflow_for_words(self, file_path, chunk_sizz, proportion_keywords):
        for n in file_path:
            LOGGER.info(f"读取文件！\n 文件地址：{n}")
            try:
                # all_text 是读取文件后的字符串
                all_text = TextTools().open_file(n)
                # all_text = jieba_to_keyword().clear_context(all_text) # 清除所有特殊字符
                LOGGER.info("读取文件成功！")
            except Exception as e:
                LOGGER.critical('#####################读取文件可能遇到错误##########################')
                LOGGER.critical(f"现在处理的文件：{n}")
                LOGGER.critical(f"报错信息: \n{e}")
                LOGGER.critical('#####################读取文件可能遇到错误##########################')
                continue
            # all_chunk 是对 all_text 语义分割后的块列表
            all_chunk = TextTools().text_to_chunk(chunk_sizz,all_text, overlap=0, processes=1)
            LOGGER.info("语义分割成功！")
            LOGGER.info(all_chunk) # debug
            LOGGER.info("遍历块-词典初始化…………………………………………………………………………………………………………")
            # 遍历块-词典初始化
            for chunk in all_chunk:
                # 生成块内学术词汇列表
                LOGGER.info(f"现在处理块：\n")
                LOGGER.info(chunk) # debug
                the_list_dist = []
                try:
                    academic_dict = LocalLlmUse().local_llm_first_to_dist(chunk)
                    LOGGER.info(academic_dict) # debug
                except TimeoutError:
                    LOGGER.info("*********************")
                    LOGGER.info("####################！！处理超时！！####################")
                    LOGGER.info(chunk)
                    LOGGER.info("*********************")
                
                
                try:
                    academic_dict_json = json.loads(academic_dict)["academic"]
                    LOGGER.info("*********************")
                    LOGGER.info(academic_dict)
                    LOGGER.info("*********************")
                    for ii in academic_dict_json:
                        the_list_dist.append(ii)
                    LOGGER.info("块内首次生成词典成功")
                except Exception as e:
                    LOGGER.critical('#####################块内首次生成词典遇到错误##########################')
                    LOGGER.critical(f"报错信息: \n{e}")
                    LOGGER.critical(f"现在处理块：\n {chunk} ")
                    LOGGER.critical(f"匹配公式：\n{chunk[0:20]}.*{chunk[-20:]}")
                    LOGGER.info("*********************")
                    LOGGER.info(academic_dict)
                    LOGGER.info("*********************")
                    LOGGER.critical('#####################块内首次生成词典遇到错误##########################')
                    continue
                # 将 the_list_dist 写入 jieba 逆向词频率文件
                try:
                    TextTools().write_unique_data(IDF_PATH, the_list_dist)
                    LOGGER.info("块内首次写入词典成功")
                except Exception as e:
                    LOGGER.critical('#####################块内首次写入词典遇到错误##########################')
                    LOGGER.critical(f"报错信息: \n{e}")
                    LOGGER.critical(f"现在处理块：\n {chunk} ")
                    LOGGER.critical(f"匹配公式：{chunk[0:20]}.*{chunk[-20:]}")
                    LOGGER.critical('#####################块内首次写入词典遇到错误##########################')        
            LOGGER.info("结束词典初始化…………………………………………………………………………………………………………………………………………")
            
            LOGGER.info("遍历块-jieba处理……………………………………………………………………………………………………………………………………")
            # 遍历块-jieba处理
            for chunk in all_chunk:          
                # 生成 jieba 关键词提取列表
                LOGGER.info(f"现在处理块：\n{chunk[0:20]} .....")
                total_word_numb = len(chunk)
                
                get_cnt=math.ceil(total_word_numb * proportion_keywords)  #向上取整，chunk字数的一定比例为关键词提取数
                the_key_jieba_list = []
                for i in Jieba2Keyword().tfide_jieba_str_to_keyword(chunk,get_cnt):
                    the_key_jieba_list.append(i)
                # 利用llm_use识别提取的关键词
                try:
                    judge_academic_or_stop = LocalLlmUse().local_llm_judge_academic_or_stop_words(chunk,the_key_jieba_list)
                    LOGGER.info("块内识别关键词、闲置词和停用词成功！")
                    LOGGER.info("*********************")
                    LOGGER.info(judge_academic_or_stop)
                    LOGGER.info("*********************")
                except TimeoutError:
                    LOGGER.info("*********************")
                    LOGGER.info("####################！！处理超时！！####################")
                    LOGGER.info(chunk)
                    LOGGER.info("*********************")
                except Exception as e:
                    LOGGER.critical('#####################块内识别关键词、闲置词和停用词遇到错误##########################')
                    LOGGER.critical(f"报错信息: \n{e}")
                    LOGGER.critical(f"现在处理块：\n {chunk}")
                    LOGGER.critical(f"匹配公式：{chunk[0:20]}.*{chunk[-20:]}")
                    LOGGER.info("*********************")
                    LOGGER.info(judge_academic_or_stop)
                    LOGGER.info("*********************")
                    LOGGER.critical('#####################块内识别关键词、闲置词和停用词遇到错误##########################')
                try:
                    json_of_part = json.loads(judge_academic_or_stop)
                    
                except Exception as e:
                    LOGGER.critical('#####################加载处理闲置词和停用词遇到错误##########################')
                    LOGGER.critical(f"报错信息: \n{e}")
                    LOGGER.critical(f"现在处理块：{chunk}")
                    LOGGER.critical(f"匹配公式：{chunk[0:20]}.*{chunk[-20:]}")
                    LOGGER.info("*********************")
                    LOGGER.info(json_of_part)
                    LOGGER.info("*********************")
                    LOGGER.critical('#####################加载处理闲置词和停用词遇到错误##########################')
                    continue
                
                academic_words = []
                idle_words = []
                stop_words= []
                try:
                    for key1 in json_of_part["academic_words"]:
                        academic_words.append(key1)
                    for key2 in json_of_part["idle_words"]:
                        idle_words.append(key2)
                    for key3 in json_of_part["stop_words"]:
                        stop_words.append(key3)
                except Exception as e:
                    LOGGER.error("关键词列表识别错误！！！")
                    LOGGER.critical(f"报错信息: \n{e}")
                # 先将识别的关键词添加进入词典文件，将识别的停用词添加进入停用词文件，将闲置词从词典文件中删除。
                TextTools().write_unique_data(IDF_PATH, academic_words)
                TextTools().write_unique_data(STOP_WORDS_PATH, stop_words)
                TextTools().delete_idle_stop_words(IDF_PATH, idle_words)
            LOGGER.info("结束-jieba处理………………………………………………………………………………………………………………………………")    
            LOGGER.info("文件处理完毕！")
    def local_workflow_only_words(self, file_path, chunk_sizz):
        for n in file_path:
            LOGGER.info(f"读取文件！\n 文件地址：{n}")
            try:
                # all_text 是读取文件后的字符串
                all_text = TextTools().open_file(n)
                # all_text = jieba_to_keyword().clear_context(all_text) # 清除所有特殊字符
                LOGGER.info("读取文件成功！")
            except Exception as e:
                LOGGER.critical('#####################读取文件可能遇到错误##########################')
                LOGGER.critical(f"现在处理的文件：{n}")
                LOGGER.critical(f"报错信息: \n{e}")
                LOGGER.critical('#####################读取文件可能遇到错误##########################')
                continue
            # all_chunk 是对 all_text 语义分割后的块列表
            all_chunk = TextTools().text_to_chunk(chunk_sizz,all_text, overlap=0, processes=1)
            LOGGER.info("语义分割成功！")
            LOGGER.info(all_chunk) # debug
            LOGGER.info("遍历块-词典初始化…………………………………………………………………………………………………………")
            # 遍历块-词典初始化
            for chunk in all_chunk:
                # 生成块内学术词汇列表
                LOGGER.info(f"现在处理块：\n")
                LOGGER.info(chunk) # debug
                the_list_dist = []
                try:
                    academic_dict = LocalLlmUse().local_llm_first_to_dist(chunk)
                    LOGGER.info(academic_dict) # debug
                except TimeoutError:
                    LOGGER.info("*********************")
                    LOGGER.info("####################！！处理超时！！####################")
                    LOGGER.info(chunk)
                    LOGGER.info("*********************")
                
                
                try:
                    academic_dict_json = json.loads(academic_dict)["academic"]
                    LOGGER.info("*********************")
                    LOGGER.info(academic_dict)
                    LOGGER.info("*********************")
                    for ii in academic_dict_json:
                        the_list_dist.append(ii)
                    LOGGER.info("块内首次生成词典成功")
                except Exception as e:
                    LOGGER.critical('#####################块内首次生成词典遇到错误##########################')
                    LOGGER.critical(f"报错信息: \n{e}")
                    LOGGER.critical(f"现在处理块：\n {chunk} ")
                    LOGGER.critical(f"匹配公式：\n{chunk[0:20]}.*{chunk[-20:]}")
                    LOGGER.info("*********************")
                    LOGGER.info(academic_dict)
                    LOGGER.info("*********************")
                    LOGGER.critical('#####################块内首次生成词典遇到错误##########################')
                    continue
                # 将 the_list_dist 写入 jieba 逆向词频率文件
                try:
                    TextTools().write_unique_data(IDF_PATH, the_list_dist)
                    LOGGER.info("块内首次写入词典成功")
                except Exception as e:
                    LOGGER.critical('#####################块内首次写入词典遇到错误##########################')
                    LOGGER.critical(f"报错信息: \n{e}")
                    LOGGER.critical(f"现在处理块：\n {chunk} ")
                    LOGGER.critical(f"匹配公式：{chunk[0:20]}.*{chunk[-20:]}")
                    LOGGER.critical('#####################块内首次写入词典遇到错误##########################')        
            LOGGER.info("文件处理完毕！")
    def workflow_for_words(self, file_path, chunk_sizz, proportion_keywords):
        for n in file_path:
            LOGGER.info(f"读取文件！\n 文件地址：{n}")
            try:
                # all_text 是读取文件后的字符串
                all_text = TextTools().open_file(n)
                # all_text = jieba_to_keyword().clear_context(all_text) # 清除所有特殊字符
                LOGGER.info("读取文件成功！")
            except Exception as e:
                LOGGER.critical('#####################读取文件可能遇到错误##########################')
                LOGGER.critical(f"现在处理的文件：{n}")
                LOGGER.critical(f"报错信息: \n{e}")
                LOGGER.critical('#####################读取文件可能遇到错误##########################')
                continue
            # all_chunk 是对 all_text 语义分割后的块列表
            all_chunk = TextTools().text_to_chunk(chunk_sizz,all_text, overlap=0, processes=1)
            LOGGER.info("语义分割成功！")
            LOGGER.info(all_chunk) # debug
            LOGGER.info("遍历块-词典初始化…………………………………………………………………………………………………………")
            # 遍历块-词典初始化
            for chunk in all_chunk:
                # 生成块内学术词汇列表
                LOGGER.info(f"现在处理块：\n{chunk[0:20]} .....")
                LOGGER.info(chunk) # debug
                the_list_dist = []
                academic_dict = LlmUse(self.api_key, self.base_url).llm_first_to_dist(chunk)
                LOGGER.info(academic_dict) # debug
                
                try:
                    academic_dict_json = json.loads(academic_dict)["academic"]
                    LOGGER.info("*********************")
                    LOGGER.info(academic_dict)
                    LOGGER.info("*********************")
                    for ii in academic_dict_json.keys():
                        the_list_dist.append(ii)
                    LOGGER.info("块内首次生成词典成功")
                except Exception as e:
                    LOGGER.critical('#####################块内首次生成词典遇到错误##########################')
                    LOGGER.critical(f"现在处理块：{chunk[0:20]} .....{chunk[-20:]}")
                    LOGGER.critical(f"匹配公式：{chunk[0:20]}.*{chunk[-20:]}")
                    LOGGER.critical(f"报错信息: \n{e}")
                    LOGGER.info("*********************")
                    LOGGER.info(academic_dict)
                    LOGGER.info("*********************")
                    LOGGER.critical('#####################块内首次生成词典遇到错误##########################')
                    continue
                # 将 the_list_dist 写入 jieba 逆向词频率文件
                try:
                    TextTools().write_unique_data(IDF_PATH, the_list_dist)
                    LOGGER.info("块内首次写入词典成功")
                except Exception as e:
                    LOGGER.critical('#####################块内首次写入词典遇到错误##########################')
                    LOGGER.critical(f"现在处理块：{chunk[0:10]} .....{chunk[-10:]}")
                    LOGGER.critical(f"匹配公式：{chunk[0:10]}.*{chunk[-10:]}")
                    LOGGER.critical(f"报错信息: \n{e}")
                    LOGGER.critical('#####################块内首次写入词典遇到错误##########################')        
            LOGGER.info("结束词典初始化…………………………………………………………………………………………………………………………………………")
            
            LOGGER.info("遍历块-jieba处理……………………………………………………………………………………………………………………………………")
            # 遍历块-jieba处理
            for chunk in all_chunk:          
                # 生成 jieba 关键词提取列表
                LOGGER.info(f"现在处理块：\n{chunk[0:20]} .....")
                total_word_numb = len(chunk)
                
                get_cnt=math.ceil(total_word_numb * proportion_keywords)  #向上取整，chunk字数的一定比例为关键词提取数
                the_key_jieba_list = []
                for i in Jieba2Keyword().tfide_jieba_str_to_keyword(chunk,get_cnt):
                    the_key_jieba_list.append(i)
                # 利用llm_use识别提取的关键词
                judge_academic_or_stop = LlmUse(self.api_key, self.base_url).llm_judge_academic_or_stop_words(chunk,the_key_jieba_list)
                try:
                    json_of_part = json.loads(judge_academic_or_stop)["judge"]
                    LOGGER.info("块内识别关键词、闲置词和停用词成功！")
                    LOGGER.info("*********************")
                    LOGGER.info(judge_academic_or_stop)
                    LOGGER.info("*********************")
                except Exception as e:
                    LOGGER.critical('#####################块内识别关键词、闲置词和停用词遇到错误##########################')
                    LOGGER.critical(f"现在处理块：{chunk[0:20]} .....{chunk[-20:]}")
                    LOGGER.critical(f"匹配公式：{chunk[0:20]}.*{chunk[-20:]}")
                    LOGGER.critical(f"报错信息: \n{e}")
                    LOGGER.info("*********************")
                    LOGGER.info(judge_academic_or_stop)
                    LOGGER.info("*********************")
                    LOGGER.critical('#####################块内识别关键词、闲置词和停用词遇到错误##########################')
                    continue
                
                academic_words = []
                idle_words = []
                stop_words= []
                for key,value in json_of_part.items():
                    if value == "academic_words":
                        academic_words.append(key)
                    elif value == "idle_words":
                        idle_words.append(key)
                    elif value == "stop_words":
                        stop_words.append(key)
                    else:
                        LOGGER.error("关键词列表识别错误！！！")
                # 先将识别的关键词添加进入词典文件，将识别的停用词添加进入停用词文件，将闲置词从词典文件中删除。
                TextTools().write_unique_data(IDF_PATH, academic_words)
                TextTools().write_unique_data(STOP_WORDS_PATH, stop_words)
                TextTools().delete_idle_stop_words(IDF_PATH, idle_words)
            LOGGER.info("结束-jieba处理………………………………………………………………………………………………………………………………")    
            LOGGER.info("文件处理完毕！")

        
      

   



if __name__ == "__main__":
    file_path = TextTools().read_file_list(FILE_PATH_GET)

    chunk_sizz = int(CONF['main']['chunk_sizz'])   #分块越大越容易报错，但分块越小处理越慢，建议为1024或2048。
    proportion_keywords = float(CONF['main']['proportion_keywords']) # 越大效果越差，但越小越容易遗漏专业词汇
    # MainJob(API_KEY, BASE_URL).workflow_for_words(file_path, chunk_sizz, proportion_keywords)
    # MainJob().local_workflow_for_words(file_path, chunk_sizz, proportion_keywords)
    MainJob().local_workflow_hanlp_for_words(file_path, chunk_sizz)
