from home.models import Jobposting
import re


def load_data():
    """
    从数据库读取岗位的信息
    """
    jobposting_list = Jobposting.objects.all()

    jobposting_list=[]
    for jobposting in jobposting_list:
        full_text = f"""
        岗位名称:{jobposting.title},职位描述:{jobposting.description},职位城市:{jobposting.city},职位学历:{jobposting.education},职位类型:{jobposting.type},职位经验:{jobposting.working},职位薪资:{jobposting.money}
        """
        jobposting_info = {
            "full_text": full_text,
            "title": jobposting.title,
            "money": jobposting.money,
            "working": jobposting.working,
            "description": jobposting.description
        }
        jobposting_list.append(jobposting_info)



    return jobposting_list

#
# def text_split(text):
#     """
#     使用spaCy进行语义切分
#
#     Args:
#         text (str): 需要切分的文本
#
#     Returns:
#         list: 切分后的文本块列表
#     """
#     # 尝试导入spaCy
#     try:
#         import spacy
#
#         # 尝试加载中文模型
#         try:
#             nlp = spacy.load("zh_core_web_sm")
#         except OSError:
#             # 如果没有中文模型，尝试加载英文模型
#             try:
#                 nlp = spacy.load("en_core_web_sm")
#             except OSError:
#                 # 如果都没有，回退到简单句子切分
#                 return simple_sentence_split(text)
#
#         # 设置最大长度以避免警告
#         nlp.max_length = len(text) + 1000
#
#         # 处理文本
#         doc = nlp(text)
#
#         # 按句子分割
#         sentences = [sent.text.strip() for sent in doc.sents if sent.text.strip()]
#
#         # 将句子组合成适当大小的块 (默认块大小为500字符)
#         chunk_size = 500
#         chunks = []
#         current_chunk = []
#         current_length = 0
#
#         for sentence in sentences:
#             sentence_length = len(sentence)
#
#             # 如果当前块加上新句子超过chunk_size，或者句子本身很长
#             if current_length + sentence_length > chunk_size and current_chunk:
#                 # 保存当前块
#                 chunks.append(' '.join(current_chunk))
#                 # 开始新块
#                 current_chunk = [sentence]
#                 current_length = sentence_length
#             else:
#                 # 添加句子到当前块
#                 current_chunk.append(sentence)
#                 current_length += sentence_length
#
#         # 添加最后一个块
#         if current_chunk:
#             chunks.append(' '.join(current_chunk))
#
#         # 过滤掉空块
#         chunks = [chunk for chunk in chunks if chunk.strip()]
#
#         return chunks
#
#     except ImportError:
#         # spaCy不可用，回退到简单分割
#         return simple_sentence_split(text)
#     except Exception as e:
#         # 其他错误，回退到简单分割
#         print(f"spaCy语义切分出错: {e}")
#         return simple_sentence_split(text)
#
#
# def simple_sentence_split(text, chunk_size=500):
#     """
#     简单的句子分割实现（备用方案）
#
#     Args:
#         text (str): 需要切分的文本
#         chunk_size (int): 块大小
#
#     Returns:
#         list: 切分后的文本块列表
#     """
#     # 使用正则表达式按句子分割
#     sentences = re.split(r'[。！？.!?]', text)
#     sentences = [s.strip() for s in sentences if s.strip()]
#
#     # 组合句子成块
#     chunks = []
#     current_chunk = []
#     current_length = 0
#
#     for sentence in sentences:
#         # 添加适当的句号（如果是中文句子）
#         if sentence and sentence[-1] not in '。！？.!?':
#             if any('\u4e00' <= char <= '\u9fff' for char in sentence):
#                 sentence += '。'
#             else:
#                 sentence += '.'
#
#         sentence_length = len(sentence)
#
#         if current_length + sentence_length > chunk_size and current_chunk:
#             chunks.append(' '.join(current_chunk))
#             current_chunk = [sentence]
#             current_length = sentence_length
#         else:
#             current_chunk.append(sentence)
#             current_length += sentence_length
#
#     if current_chunk:
#         chunks.append(' '.join(current_chunk))
#
#     # 过滤掉空块
#     chunks = [chunk for chunk in chunks if chunk.strip()]
#
#     return chunks


if __name__ == '__main__':
    str="""
    通义千问是阿里云研发的大语言模型，支持多模态理解与生成。其核心能力包括文本生成、图像分析、语义分割等。
在实际应用中，通义千问可用于智能客服、内容创作、数据分析等场景。例如，在客服场景中，它能快速识别用户问题意图，
并生成准确的回答；在内容创作场景中，它可根据用户指令生成文章、报告等内容。
"""

    #chunks = text_split(str)
    #load_dataprint(chunks)

    load_data()

