# coding=utf-8
import time

t = time.time()
import json
import logging
from datetime import datetime
from bid.generate.tools.dawate import main_stream
import threading
from copy import deepcopy
import math

current_time = datetime.now()

import os

# 设置日志
ptah = "./out_log/"
if not os.path.exists(ptah):
    os.mkdir(ptah)

logging.basicConfig(level=logging.DEBUG,
                    format='%(asctime)s - %(levelname)s - %(message)s',
                    filename='./out_log/' + current_time.strftime("%Y-%m-%d_%H-%M-%S") + 'dawate_log.txt',
                    filemode='a')  # 追加模式

import pymysql
from datetime import datetime


def db_connet():
    conn = pymysql.connect(
        host='127.0.0.1',  # 连接名称，默认127.0.0.1
        user='root',  # 用户名
        passwd='123456',  # 密码
        port=3306,  # 端口，默认为3306
        db='zentao',  # 数据库名称
        charset='utf8',  # 字符编码
    )
    print("----数据库连接成功------", conn)
    logging.info("----数据库连接成功------ %s", conn)
    return conn


from docx import Document


def extract_table_data(tables, table_title):
    """
    提取表格数据，并附加表格标题
    Args:
        table: docx表格对象
        table_title: 表格标题
    Returns:
        包含表格标题和内容的列表
    """
    table_data_temp = []
    table_data = [table_title]  # 将表格标题添加到列表中
    # 收集所有处理后的行数据
    for table in tables:
        for row in table.rows:
            row_data = ['']  # 初始空字符串用于去重处理
            # row_data = [str(table_index)]  # 初始空字符串用于去重处理
            cells = row.cells
            for i, cell in enumerate(cells):
                if cell in cells[:i]:
                    cell_text = ''
                else:
                    # 处理单元格内容
                    if cell.tables:
                        # 递归读取子表格并转换为字符串
                        _, sub_rows = extract_table_data(cell.tables, table_title)
                        # 将子表格每行连接为字符串，并用分号分隔不同行
                        cell_text = '; '.join([', '.join(sub_row) for sub_row in sub_rows])
                    else:
                        cell_text = cell.text.strip()

                # 去重及空值处理
                # if cell_text != row_data[-1] and cell_text != '':
                row_data.append(cell_text)

            # 过滤初始空字符串并添加到结果
            filtered_row = row_data[1:] if len(row_data) > 1 else []
            if filtered_row:
                # 处理表格嵌套表格
                for i, item in enumerate(filtered_row):
                    if '%%' in item:
                        split_items = item.split('%%')
                        # 去除空格
                        filtered_row[i:i + len(split_items)] = split_items

                table_data.append(filtered_row)
                table_data_temp.append(filtered_row)
    # print(table_data)
    return table_data, table_data_temp


def splittextintosegments_element(docxpath):
    """
    将Word文档中的文本按每十个段落分割，只保留以结束符（句号）结尾的段落。

    :param docxpath: Word文档的路径
    :return: 分割后的文本列表
    """
    try:
        # 打开文档
        doc = Document(docxpath)
    except Exception as e:
        print(f"无法打开文档：{e}")
        return []

    segments = []
    tempsegment = []

    # 遍历文档中的每个段落
    for element in doc.element.body:
        if element.tag.endswith('p'):  # 段落
            text = element.text.strip().replace(" ", "").replace("\n", "").replace("\t", "")
            if text.strip() != "":
                # print("text:",text)
                table_title = text.strip()
        elif element.tag.endswith('tbl'):  # 表格

            list_d = [i for i in doc.element.body.inner_content_elements if "<w:tbl>" in str(i)]

            table = doc.tables[list_d.index(element)]
            table_data = extract_table_data([table], table_title)
            segments.append(str(table_data))

    return segments


def splittextintosegments(docxpath):
    """
    将Word文档中的文本按每十个段落分割，只保留以结束符（句号）结尾的段落。
    段落为10 字数为1371
    20 1679
    :param docxpath: Word文档的路径
    :return: 分割后的文本列表

    """
    try:
        # 打开文档
        doc = Document(docxpath)
    except Exception as e:
        print(f"无法打开文档：{e}")
        return []

    segments = []
    tempsegment = []

    # 遍历文档中的每个段落
    for i, paragraph in enumerate(doc.paragraphs):
        # 将段落文本添加到临时段落列表
        tempsegment.append(paragraph.text)

        # 检查是否达到了10段或者当前段落是文档的最后一个段落
        if (i + 1) % 20 == 0 or i == len(doc.paragraphs) - 1:
            # 如果最后一个段落不以句号结尾，则继续添加后续段落
            while i < len(doc.paragraphs) - 1 and not tempsegment[-1].strip().endswith('。'):
                i += 1
                tempsegment.append(doc.paragraphs[i].text)
            # print("tempsegment:",len(str(tempsegment)),tempsegment)

            # 将临时段落列表合并为一个字符串，并添加到分割文本列表
            segments.append('\n'.join(tempsegment))
            # 重置临时段落列表
            tempsegment = []
    return segments


def split_text_into_segments_element(text_list, max_length=4000):
    """
    Splits a list of text into segments, each with a maximum length of 4000 characters.
    If a text segment and the next one combined exceed 4000 characters, the current segment is used alone.
    If the current segment and the next two combined do not exceed 4000 characters, they are merged into one segment.

    :param text_list: List of text strings
    :param max_length: Maximum length of each segment
    :return: A list of text segments
    """
    segments = []
    i = 0
    while i < len(text_list):
        current_text = text_list[i]
        if len(current_text) > max_length:
            # If the current text is longer than max_length, add it as a separate segment
            segments.append(current_text)
            i += 1
        else:
            # Check the length of the next text
            if i + 1 < len(text_list):
                next_text = text_list[i + 1]
                combined_length = len(current_text) + len(next_text)
                if combined_length > max_length:
                    # If the combined length of current and next text exceeds max_length, add current text as a segment
                    segments.append(current_text)
                    i += 1
                else:
                    # Check if the next two texts can be combined
                    if i + 2 < len(text_list):
                        next_next_text = text_list[i + 2]
                        combined_length_with_next_next = combined_length + len(next_next_text)
                        if combined_length_with_next_next <= max_length:
                            # If the combined length of current, next, and next_next texts does not exceed max_length, merge them
                            segments.append(current_text + next_text + next_next_text)
                            i += 3
                        else:
                            # Otherwise, merge current and next text
                            segments.append(current_text + next_text)
                            i += 2
                    else:
                        # If there's no next_next text, merge current and next text
                        segments.append(current_text + next_text)
                        i += 2
            else:
                # If there's no next text, add the current text as a segment
                segments.append(current_text)
                i += 1
    return segments


import re
import json_repair


def json_process(data_answ):
    pattern = r'```json(.*?)```'
    matches = re.findall(pattern, data_answ, re.DOTALL)  # 抽取出内容并判断长度
    if matches:
        c_statement = matches[-1]
        # print("\n-------step2查到的对应的内容为：\n", c_statement)
    else:
        c_statement = ''
    # decoded_object = json_repair.loads(c_statement)
    # return json.dumps(decoded_object,ensure_ascii=False)
    return c_statement


def json_process_to(data):
    pattern = r'```json{(.*?)}```'
    matches = re.findall(pattern, data.split('</think>')[-1], re.DOTALL)
    sql_statement = '\n'.join(matches)
    return sql_statement


def slice_text(text, slice_length):
    slices = []
    slices_new = []
    current_slice = ""
    current_length = 0
    title = ''
    if ':' in text:
        title = text.split('：')[0]
    for char in text:
        current_slice += char
        current_length += 1

        # 当当前切片长度接近1000时，开始寻找分割点
        if current_length >= slice_length:
            # 查找最近的）》号或上一个标点符号
            split_point = re.search(r'》|[^，。！？；：]', current_slice[::-1])
            if split_point:
                # 获取分割点的真实位置
                split_index = len(current_slice) - split_point.start()
                # 添加切片到列表
                slices.append(current_slice[:split_index])
                # 重置当前切片和长度计数器
                current_slice = current_slice[split_index:]
                current_length = len(current_slice)

    # 添加最后一个切片
    if current_slice:
        slices.append(current_slice)
    for i, slice in enumerate(slices):
        if i == 0:
            print(f"切片{i + 1}：长度为：{len(slice)}{slice}\n")
            slices_new.append(slice)
        else:
            print(f"切片{i + 1}：长度为：{len(slice)}{title + slice}\n")
            slices_new.append(title + slice)
    return slices_new


def split_list_into_chunks(lst, chunk_size=5000):
    """
    Splits a list of strings into chunks of a specified size, ensuring that individual strings are not split.

    :param lst: List of strings to be split.
    :param chunk_size: Maximum size of each chunk.
    :return: List of chunks.
    """
    chunks = []
    current_chunk = []

    for item in lst:
        # If adding this item to the current chunk exceeds the chunk size, start a new chunk
        if len(item) > chunk_size:
            print('长度超长了：', len(item))
            slices = slice_text(item, chunk_size)
            for s_i in slices:
                current_chunk.append(s_i)
        if sum(len(x) for x in current_chunk) + len(item) > chunk_size:
            chunks.append(''.join(current_chunk))
            current_chunk = [item]
        else:
            current_chunk.append(item)

    # Add the last chunk if it's not empty
    if current_chunk:
        chunks.append(''.join(current_chunk))

    return chunks


def save_txt(filename, data):
    '''
    :param filename: 文件名
    :param data:list
    :return:
    '''
    file = open(filename, 'w', encoding='utf-8')
    for i in data:
        file.writelines(str(i) + '\n')


def text_process(dataansw1):
    # Corrected and modified code
    pattern1 = r"第(\d+)页"
    matches = re.findall(pattern1, dataansw1, re.DOTALL)
    if matches:
        for match in matches:
            matchindex = f'第{match}页'
            dataansw = dataansw1.replace(matchindex, '')
    else:
        dataansw = dataansw1

    return dataansw


def split_text_into_segments(file_path, max_length=10000):
    '''
    Read the text and table content by paragraph.
    :param file_path:
    :param max_length:
    :return:
    '''
    table_dict = {}
    segments = []
    current_segment = ""
    current_length = 0
    doc = Document(file_path)
    for element in doc.element.body:
        if element.tag.endswith('p'):  # 段落
            # text = element.text.strip().replace(" ", "").replace("\n", "").replace("\t", "")
            text = element.text.strip()
            text = text_process(text)
            if text.strip() != "":
                table_title = text.strip()
                # 检查添加这段文本是否会超过最大长度
                if current_length + len(text) > max_length:
                    # 如果超过，先保存当前片段，然后开始新的片段
                    segments.append(current_segment)
                    current_segment = text
                    current_length = len(text)
                else:
                    # 如果不超过，添加到当前片段
                    current_segment += text
                    current_length += len(text)
        elif element.tag.endswith('tbl'):  # 表格
            # 如果当前片段不为空，且不是由于长度不足而直接遇到表格
            if current_segment:
                segments.append(current_segment)
                current_segment = ""
                current_length = 0

            # 提取表格数据
            list_d = [i for i in doc.element.body.inner_content_elements if "<w:tbl>" in str(i)]
            # print(list_d)

            table = doc.tables[list_d.index(element)]

            table_data, table_data_temp = extract_table_data([table], table_title)

            if table_title in table_dict.keys():
                # table_dict[table_title].append(str(table_data_temp))
                table_dict[table_title].append(table_data_temp)
            else:
                # table_dict[table_title] = [str(table_data_temp)]
                table_dict[table_title] = [table_data_temp]
            # print('表格内容：',table_data)
            segments.append(str(table_data))

    # 添加最后一个片段（如果存在）
    if current_segment:
        segments.append(current_segment)

    return segments, table_dict


def write_file(keys, data_answ):
    current_time = datetime.now()
    f_n = current_time.strftime('%Y%m%d_%H%M%S')
    key_ = str(keys).replace("\n", '').replace(' ', '')
    f = f'./out_json/{key_}{f_n}.txt'
    if not os.path.exists('./out_json'):
        os.mkdirs('./out_json')
    file = open(f, 'w', encoding='utf-8')
    file.writelines(data_answ)


def split_list_by_pattern(lst, pattern):
    result = []
    temp = []
    for item in lst:
        if pattern in item:
            split_index = item.index(pattern)
            temp.append(item[:split_index])
            result.append(temp)
            temp = [item[split_index:]]
        else:
            temp.append(item)
    if temp:
        result.append(temp)
    return result


def json_process_split(data_answ):
    pattern = r'```json(.*?)```'
    matches = re.findall(pattern, data_answ, re.DOTALL)  # 抽取出内容并判断长度
    if matches:
        content_split = split_list_into_chunks(matches)
        c_statement = content_split
    else:
        c_statement = []
    # decoded_object = json_repair.loads(c_statement)
    # return json.dumps(decoded_object,ensure_ascii=False)
    return c_statement


# 全局变量
# global_thread = {'基础信息': [], '其他额外信息':[],'其他额外信息合并':[],'详细评审标准': [], '其他':[]}
global_thread = {'其他额外信息': [], '详细评审标准': [], '其他': []}

# 大模型线程并发数量
pool_sema = threading.BoundedSemaphore(10)  # 或使用Semaphore方法


# 大瓦特回复
# @retry(stop=stop_after_attempt(50), wait=wait_fixed(20))
def dawate_streaming(content, extra={}):
    # 开启线程限制
    with pool_sema:

        ls = []
        if isinstance(content, str):
            ls.append({"role": "user", "content": content})
        elif isinstance(content, list):
            for i in content:
                ls.append({"role": "user", "content": i})

        config_params = {
            'relAppId': '17987690',
            'appId': '17987868',
            'appSecret': '5570ea83260dbcad482a7c9175411d5e',
            # 'messages': [{"role": "user", "content": ''.join(["你是谁"]*5000)}]
            'messages': ls
            # 'messages': ls
        }
        answer = ''
        response_content = main_stream(config_params)
        try:
            while True:
                a = next(response_content)
                answer += a
                print(a,end='')
        except StopIteration:
            pass

        # 回答去除think部分
        answer = answer.split('</think>')[-1]

        _type = extra.get('type', None)

        if _type is None:
            _type = ''

        if _type in ['详细评审标准']:
            global_thread[_type].append(answer)
            # 其他额外信息合并
        elif _type == '基础信息':
            global_thread['基础信息'] = answer
        # elif _type == '其他额外信息合并':
        #     print('其他额外信息合并',answer)
        #     global_thread['其他额外信息']=answer
        elif _type == '其他额外信息':
            global_thread['其他额外信息'].append(answer)
            # print(answer)
        elif _type == '详细评审标准':
            global_thread['详细评审标准'].append(answer)
        elif _type == '详细评审标准完善':
            global_thread['详细评审标准'][extra['bid_name']]['适用标包列表'] = answer.replace('，', ',')
        else:
            global_thread['其他'].append(answer)
            # global_thread['其他'].append([extra['1'],answer])

        # return answer


# 提取基础信息
def extract_basic_info(split_segments_tem):
    # 提示词
    prompt_base_keyinformation = '''
            根据以上信息，抽取出以下关键信息并以json格式返回，只返回抽取到的关键信息,没抽到关键信息就返回,""。
            输出格式要求：

            ```json{
            "招标人/代理人基本信息":{
            "招标人","xxxx",
            "招标代理机构":"xxxx",
            "招标人（或招标代理机构）主要负责人或授权的项目负责人":"xxxx" ，
            "招标人联系方式":"xxxxx"
            "招标人地址":"xxxxx"
            "招标人地址":"xxxxx"},
            "项目信息":{
                "招标项目名称":"xxxx",
                "采购编号/项目编号":"xxx",
                "招标日期":"xxxx",
                "项目概述":"xxxxx",
                "招标项目所在地区":"xxxxxx",
                "资格审查方式":"xxxxxx",
                "招标分类":"xxxxx",
                "项目联系人":"xxxx"
                "项目联系方式":"xxxxxx",
                "项目联系地址":"xxxxxx",
                "是否允许分包":"是或否"
                },
            "关键时间节点":{
                "投标文件截止时间":"xxxx",
                "投标方式":"xxxx",
                "开标时间":"xxxx",
                "开标地点":"xxxx",
                "投标有效期":"xxxx",
                "信息公告媒介":"xxxxx"
                    },
            "保证金相关":{
                "履约保证金":"提交/不提交"，
                "是否有质量保证金":"是/否",
                "退还投标保证金","是/否",
                "投标保证金":"提交/不提交"
            }
            }```
            '''

    # 整一个线程池
    basic_info_threadpool = []

    print(f'基础信息抽取中，共{len(split_segments_tem)}个======================')

    # 获取结果
    # 启用线程
    # for i, segment in enumerate(split_segments_tem):
    #     query = f'{segment}{prompt_base_keyinformation}'
    basic_info_threadpool.append(threading.Thread(target=dawate_streaming, args=(
    split_segments_tem + [prompt_base_keyinformation], {"type": '基础信息'})))

    # 启用和阻塞线程
    for th in basic_info_threadpool:
        th.start()
    for th in basic_info_threadpool:
        threading.Thread.join(th)

    print(f'基础信息抽取完成======================')

    # #合并基础信息列表
    # global_thread['基础信息'] = '\n'.join(global_thread['基础信息'])
    #
    # #合并结果
    # query = f"数据为：{global_thread['基础信息']}，\n对合并数据进行数据清洗，删除冗余数据及其错误数据，如果只有一个数值就不要用list直接用字符串，不要改变数据格式"
    # th = threading.Thread(target=dawate_streaming, args=(query,{"type":'基础信息合并'}))
    # th.start()
    # threading.Thread.join(th)
    #
    # print(f'基础信息合并完成======================')


# 提取额外信息
def extract_other_info(split_segments_tem):
    # 提示词
    Tender_document_requirements_prompt = '''
        抽取以下关键信息并以json的格式输出，输出的json一定要是字典，抽取的关键信息按照原文输出，不允许归纳总结，有键名的需严格按照键名抽取，没抽到关键信息就返回空""
        无效标与废标项中如果该项只针对个别的标的或标包，需要在后面用括号标注
        其中开评定标流程的内容只需参考对应章节及其内容即可，但是需要将该章节下所有内容提取出来，不允许遗漏删减
        ```json{"投标文件要求": {"投标文件要求":"xxxxx",
             "投标文件的组成":"xxxxx",
             "投标报价":"xxxxx",
             "投标有效期":"xxxxx",
             "投标保证金 ":"xxxxx",
             "资格审查资料 ":"xxxxx",
             "备选投标方案 ":"xxxxx",
                     }，
        "开评定标流程": {"开标":{"开标时间和地点":"xxxx","开标程序":"xxxx", "开标异议":"xxxx"},
                     "评标":{"评标委员会":"xxxx","评标原则":"xxxx", "评标":"xxxx"},
                     "合同授予内容":{合同授予章节下的全部内容}},
          "无效标与废标项": {"情况1":"xxx","情况2":"xxx","情况3":"xxx"}

         }```
        '''

    # 整一个线程池
    basic_info_threadpool = []

    print(f'其他额外信息，共{len(split_segments_tem)}个======================')

    # 获取结果
    # 启用线程
    for i, segment in enumerate(split_segments_tem):
        # print(f'={i}=',[segment])
        query = f'{segment}{Tender_document_requirements_prompt}'
        basic_info_threadpool.append(threading.Thread(target=dawate_streaming, args=(query, {"type": '其他额外信息'})))

    # for i in range(0,len(split_segments_tem),2):
    #     query = split_segments_tem[i*2:i*2+3] +[Tender_document_requirements_prompt]
    #     basic_info_threadpool.append(threading.Thread(target=dawate_streaming, args=(query, {"type":'其他额外信息'})))

    # 启用和阻塞线程
    for th in basic_info_threadpool:
        th.start()
    for th in basic_info_threadpool:
        threading.Thread.join(th)
        # 合并结果
    # 合并基础信息列表
    other_info = global_thread.pop('其他额外信息')
    other_info = '\n'.join(other_info)

    query = f"数据为：{other_info}，\n对合并数据进行数据清洗，删除冗余数据及其错误数据，如果只有一个数值就不要用list直接用字符串，不要改变数据格式"
    th = threading.Thread(target=dawate_streaming, args=(query, {"type": '其他额外信息合并'}))
    th.start()
    threading.Thread.join(th)

    print(f'其他额外信息合并完成======================')


# 获取详细评审标准
def get_review(split_result, keys):
    # 提示词
    prompt_item_Detailedscoring = '''
        抽取以下关键信息并以json的格式输出，输出的json一定要是字典，抽取的关键信息按照原文输出，当抽取到适用标包时，你需要只根据适用标包生成适用标包列表，适用标包不为空时，适用标包列表也不能为空，没抽到关键信息就返回空""
        ```json
        {"详细评审标准 ":{
            "适用标包":"标 1、2019-2020 年软硬件平台维护[包 1:信息中心安全设备及安全系统维护服务,包 2:信息中心台区线损在线分析预测等系统软件基础维护服务,标包 3:信息中心云电智云平台软件高级维护服务]",
            "适用标包列表": ["标的1-标包1","标的1-标包2","标的1-标包3"]
            "商务评审（100）分":[
            {"序号":"xxx","评分因素":"xxxxx","详细评审项":"xxxxx","详细评审分项要素":"xxxxx","分值":"xxxxx"},
             {"序号":"xxx","评分因素":"xxxxx","详细评审项":"xxxxx","详细评审分项要素":"xxxxx","分值":"xxxxx"}
            ]
            "技术评审（100）分":[
            {"序号":"xxx","评分因素":"xxxxx","详细评审项":"xxxxx","详细评审分项要素":"xxxxx","分值":"xxxxx"},
             {"序号":"xxx","评分因素":"xxxxx","详细评审项":"xxxxx","详细评审分项要素":"xxxxx","分值":"xxxxx"}
            ]
             "价格评审（100）分":[
                    {"价格分计算方法名称":"xxx","评标基准价计算方法":"xxx","价格分计算公式":"xxxxx","进入价格评分厂家数量":"xxxxx","下浮率":"xxxxx","RL":"xxxxx",
                    "RH":"xxx","BP":"xxxxx","HP":"xxxxx","LP":"L","若排名前BP%的供应商数量小于等于L值，是否去掉最高最低":"xxxxx"}
                    ]
            }
        }```
        '''

    # 整一个线程池
    review_threadpool = []

    print(f'详细评审标准抽取中，共{len(split_result)}个======================')

    # 获取结果
    # 启用线程
    for split_i in split_result:
        query = f'表名:{keys}\n表内容：\n{split_i}\n{prompt_item_Detailedscoring}'
        review_threadpool.append(
            threading.Thread(target=dawate_streaming, args=(query, {"type": '详细评审标准'})))

    # 启用和阻塞线程
    for th in review_threadpool:
        th.start()
    for th in review_threadpool:
        threading.Thread.join(th)

    # 保存到文件中
    with open('详细评审标准.json', 'w') as f:
        json.dump(global_thread, f, ensure_ascii=False)

    print(f'详细评审标准抽取完成======================')

    # 格式化详细评审标准
    json_format = {}
    global_thread['详细评审标准'] = [json_repair.loads(i) for i in global_thread['详细评审标准']]
    for i in global_thread['详细评审标准']:
        for key, value in i.items():
            if value['适用标包']:
                json_format[value['适用标包']] = value
    global_thread['详细评审标准'] = json_format

    # 提示词
    # prompt_base_keyinformation = '''
    # 将数据处理成:[标的x-标包x,标的x-标包x],以json的格式输出,分隔逗号要用英文。
    # 样例：
    # 输入：标的4：技术服务2024年第二批采购项目[标包2，标包4，标包5]
    # 输出：```json["标的4-标包2","标的4-标包4","标的4-标包5"]```
    # '''
    #
    # # 整一个线程池
    # review_threadpool = []
    #
    # print(f'详细评审标准完善中，共{len(global_thread["详细评审标准"].keys())}个======================')
    #
    # # 添加适用标包列表
    # for key in global_thread['详细评审标准'].keys():
    #
    #     query = f'数据为：{key}\n{prompt_base_keyinformation}'
    #     review_threadpool.append(threading.Thread(target=dawate_streaming, args=(query, {"type":'详细评审标准完善', 'bid_name':key})))
    #
    # # 启用和阻塞线程
    # for th in review_threadpool:
    #     th.start()
    # for th in review_threadpool:
    #     threading.Thread.join(th)

    print(f'详细评审标准完善完成======================')

    # 保存到文件中
    with open('详细评审标准完善.json', 'w') as f:
        json.dump(global_thread, f, ensure_ascii=False)

    # 格式化详细评审标准
    # new_dict = {}
    # for key, value in global_thread['详细评审标准'].items():
    #     if value['适用标包'] and value['适用标包'] != '标的xxxx,标的xxxxxx':
    #         answer = value.pop('适用标包列表')
    #         answer = json_process(answer)
    #         if "json" in answer:
    #             answer = json_process(answer)
    #         list_s1=answer.replace("\n",'').replace(" ",'').replace("\"",'')[1:-1].split(',')
    #         for l_i in list_s1:
    #             if l_i in new_dict:
    #                 print(f'error\t{[l_i]}\t{answer}')
    #
    #             new_dict[l_i] = value
    # global_thread['详细评审标准'] = new_dict

    # 保存到文件中
    with open('详细评审标准完善-处理后.json', 'w') as f:
        json.dump(global_thread, f, ensure_ascii=False)


# 抽取商务评分位置
def json_transfor(data, result_all):
    pattern = re.compile(r'(（一）、商务评审（100）分|（二）、技术评审（100）分|（三）、价格评审（100）分)')

    # 修正代码
    # 初始化结果字典和当前键
    result = {}

    current_key = None

    # 遍历列表s
    for item in data:
        # 使用正则表达式查找匹配的字符串
        match = pattern.search(','.join(item))
        if match:
            # 如果找到匹配的标题，则将其作为字典的键
            current_key = match.group(1)
            result[current_key] = []  # 初始化列表
        elif current_key:
            # 如果当前有有效的键，则将数据添加到对应的列表中
            # 过滤掉空字符串
            result[current_key].append(item)

    # 输出结果
    result_all[''.join(data[0])] = result
    return result_all


# 详细评审标准格式化为json
def json_split(split_result):
    '''
    #抽取关键信息
    :return:
    '''

    result_temp = {}
    for index, sp_list in enumerate(split_result):
        result_temp = json_transfor(sp_list, result_temp)

    return result_temp


# 切分数据
def split_data(data, string_split):
    # 切分数据
    segments = []
    current_segment = []

    for da_ in data:
        for sublist in da_:
            if string_split in str(sublist):
                if current_segment:  # 如果当前片段不为空，则开始新片段
                    segments.append(current_segment)
                    current_segment = [sublist]
                else:
                    current_segment = [sublist]
            else:
                current_segment.append(sublist)

    # 添加最后一个片段（如果存在）
    if current_segment:
        segments.append(current_segment)

    return segments

#合并分割的表格
def concat_table(values, operate='concat'):
    ls = []

    #获取列表维数
    if values and values[0] and isinstance(values[0][0], list):
        #合并表格
        for i in values:
            ls += i
    else:
        ls = values

    #计算表格宽度
    column_num = sum([len(i) for i in ls]) / len(ls)
    #对ls过滤
    ls = [i for i in ls if len(i)>=column_num]

    new_ls = ls[:1]

    #填充空单元格
    for i in range(1, len(ls)):
        value = ls[i]

        #合并表格
        if operate == 'concat':
            #重复行不要
            if value in new_ls:
                continue

            #如果第一个单元格
            if value[0] == '':
                for j, item in enumerate(value):
                    new_ls[-1][j] += item
            else:
                new_ls.append(value)
        else:
            #填充表格
            for j, item in enumerate(value):
                if item == '':
                    value[j] = new_ls[-1][j]

            if value not in new_ls:
                new_ls.append(value)

    return new_ls

def bid_parse_func(docxpath):
    '''
    #抽取关键信息
    :return:
    '''

    global global_thread

    # 使用函数
    textsegments, table_dict = split_text_into_segments(docxpath)
    l = [len(i) for i in textsegments]
    print(f'最大长度为{max(l)}')

    # 基础信息只需要前面一部分
    split_segments_tem_part = []
    for i in textsegments:
        if '开标记录表' in i:
            break
        split_segments_tem_part.append(i)
        # print(len(i),[i])

    # print(len(s))
    split_segments_tem_part = split_list_into_chunks(split_segments_tem_part, chunk_size=10000)

    # split_segments_tem = split_list_into_chunks(textsegments, chunk_size=10000)

    # 定义线程池
    bid_threadpool = []

    # 提取基础信息
    # bid_threadpool.append(threading.Thread(target=extract_basic_info, args=(split_segments_tem_part,)))

    # 提取额外信息
    # bid_threadpool.append(threading.Thread(target=extract_other_info, args=(split_segments_tem,)))

    # step2  单独拿出表格内容：标的清单及分包情况 详细评审标准
    prompt_item_list = '''
     ```json
     {
     "标的清单及分包情况/招标范围":[
        {
        "标的名称":"标的名称xx",
        "概算金额（万元）":"xxxx",
        "包括的标包为":[{"标包名称":"标包名称"，"标包金额":"xxx","最高限价（万元）":"xxx","服务期/工期":"xxxxx"   },
                   {"标包名称":"标包名称"，"标包金额":"xxx","最高限价（万元）":"xxx","服务期/工期":"xxxxx"   },
                    ]
        },

        {
        "标的名称":"标的名称xx",
        "概算金额（万元）":"xxxx",
        "包括的标包为":[{"标包名称":"标包名称"，"标包金额":"xxx","最高限价（万元）":"xxx","服务期/工期":"xxxxx"   },
                   {"标包1":"标包名称"，"标包金额":"xxx","最高限价（万元）":"xxx","服务期/工期":"xxxxx"   },
                    ]
        }
        ]
    }```
    '''
    prompt_item_Prelimscoring = '''
    ```json{
    "初步评审标准":[{"序号":"xxx","初审要素":"xxxxx","评审标准":"xxxxx"},
               {"序号":"xxx","初审要素":"xxxxx","评审标准":"xxxxx"},
                ]
    }```
    '''

    prompt_geneRequ = '''
    ```json{
    "通用资格要求":{

    "要求1":"xxxxx",

    "要求2":"xxxxx",

            }
    }```
    '''
    prompt_speRequ = '''
    当抽取到关联标的/标包/标段时，你需要只根据关联标的/标包/标段生成适用标包列表，关联标的/标包/标段不为空时，适用标包列表也不能为空
    ```json{
       "专用资格要求":
    [{"要求":"要求内容xxxxx","关联标的/标包/标段":"xxxxx","适用标包列表":["标的1","标的1-标包1"]},
    {"要求":"要求内容xxxxx","关联标的/标包/标段":"xxxxx","适用标包列表":["标的1","标的1-标包1"]}]
    }```
       '''

    prompt_score_composition = '''
            ```json{"分值构成":
             [{"序号":"1","标的/标包","xxx","商务分权重":"xxxxx","技术分权重":"xxxxx","价格分权重":"xxxxx","价格分计算方法":"xxxxx"},
            {"序号":"2","标的/标包","xxx","商务分权重":"xxxxx","技术分权重":"xxxxx","价格分权重":"xxxxx","价格分计算方法":"xxxxx"}]
            }```'''

    prompt_recommend_prin = '''
                 ```json{
                 "中标推荐原则":
                [{"序号":"1","标的/标包","xxx","中标推荐原则":"xxxxx"},
                {"序号":"2","标的/标包","xxx","中标推荐原则":"xxxxx"}]
                }```
                '''

    prompt_0 = '抽取以下关键信息并以json的格式输出，输出的json一定要是字典，抽取的关键信息按照原文输出，没抽到关键信息就返回空""'

    Instructions_to_bidders = '''```json{投标人须知前附表":[{"条款号":"xxx","条款名称":"xxx","编列内容":"xxx"},
                                                    {"条款号":"xxx","条款名称":"xxx","编列内容":"xxx"} ]   
                                                                }```'''

    for keys, values in table_dict.items():
        # print([keys])
        if "标的清单及分包情况" in keys:
            print('标的清单及分包情况===============================================')
            values = concat_table(values, 'padding')
            for i in values:
                print(i)
            # 提取标的清单及分包情况
            query = f"{values}\n{prompt_0}\n{prompt_item_list}"
            bid_threadpool.append(threading.Thread(target=dawate_streaming, args=(query,)))
    #
        # elif "详细评审标准" in keys:
        #     print('详细评审标准===============================================')
        #     split_result = split_data(values, '适用标包')
        #     for new_values in split_result:
        #         new_values = concat_table(new_values)
        #         for i in new_values:
        #             print([i])
        #     # 提取详细评审标准
        #     bid_threadpool.append(threading.Thread(target=get_review, args=(split_result, keys)))
        #
        #     # 原文pdf表格提取
        #     global_thread['详细评审标准-表格展示'] = json_split(split_result)

        elif "初步评审标准" in keys:
            print('初步评审标准===============================================')
            values = concat_table(values)
            for i in values:
                print(i)
            # 提取初步评审标准
            query = f"{values}\n{prompt_0}\n{prompt_item_Prelimscoring}"
            bid_threadpool.append(threading.Thread(target=dawate_streaming, args=(query,)))
    #
        elif "通用资格要求" in str(values):
            print('通用资格要求===============================================')
            values = concat_table(values)
            for i in values:
                print(i)
            # 提取通用资格要求
            query = f"{values}\n{prompt_0}\n{prompt_geneRequ}"
            bid_threadpool.append(threading.Thread(target=dawate_streaming, args=(query,)))
        #
        # # elif "专用资格要求" in keys:
        elif "专用资格要求" in str(values):
            print('专用资格要求===============================================')
            values = concat_table(values)
            for i in values:
                print(i)
            # 提取专用资格要求
            query = f"{values}\n{prompt_0}\n{prompt_speRequ}"
            bid_threadpool.append(threading.Thread(target=dawate_streaming, args=(query,)))
    #
        if "中标推荐原则" in keys:
            print('中标推荐原则===============================================')
            values = concat_table(values)
            for i in values:
                print(i)
            # 提取通用资格要求
            query = f"{values}\n{prompt_0}\n{prompt_recommend_prin}"
            bid_threadpool.append(threading.Thread(target=dawate_streaming, args=(query,)))

        elif "分值构成" in keys:
            print('分值构成===============================================')
            values = concat_table(values)
            for i in values:
                print(i)
            # 提取通用资格要求
            query = f"{values}\n{prompt_0}\n{prompt_score_composition}"
            bid_threadpool.append(threading.Thread(target=dawate_streaming, args=(query,)))

        elif "投标人须知前附表" in keys:
            print('投标人须知前附表===============================================')
            values = concat_table(values)
            for i in values:
                print(i)
            # 提取通用资格要求
            query = f"{values}\n{prompt_0}\n{Instructions_to_bidders}"
            bid_threadpool.append(threading.Thread(target=dawate_streaming, args=(query,)))

    # print(bid_threadpool)
    # 启用和阻塞线程
    for th in bid_threadpool:
        th.start()
    for th in bid_threadpool:
        threading.Thread.join(th)

    # 保存到文件中
    with open('非json化结果.json', 'w') as f:
        json.dump(global_thread, f, ensure_ascii=False)

    others = global_thread.pop('其他')
    for key, value in global_thread.items():
        if isinstance(value, str):
            global_thread[key] = json_repair.loads(json_process(value))
    for other in others:
        process_data = json_repair.loads(json_process(other))
        if isinstance(process_data, dict):
            global_thread.update(process_data)
        else:
            print('不是字典的有问题的回答: ', [process_data])

    # 保存到文件中
    with open('json化结果.json', 'w') as f:
        json.dump(global_thread, f, ensure_ascii=False)

    res = deepcopy(global_thread)
    # 将全局变量置空
    global_thread = {'其他额外信息': [], '详细评审标准': [], '其他': []}

    return res


if __name__ == '__main__':
    #
    st = time.time()
    bid_parse_func('/media/977GB/wcj_work/投标文件生成/生成文件/bid_parse/20250418_013816_1_南方电网公司2024年第八批信息化项目招标文件.docx')
    print(time.time() - st)
    # threads = []
    # for i in range(50):  # 假设我们想创建3个线程
    #     threads.append(threading.Thread(target=dawate_streaming, args=('你是谁',{"1":i})))
    #
    # print(threads)
    # # 等待所有线程完成
    # for t in threads:
    #     t.start()  # 启动线程
    #
    # # 等待所有线程完成
    # for t in threads:
    #     t.join()
    #
    # print('====================================')
    # s = sorted(global_thread['其他'],key=lambda x:x[0])
    # for i in s:
    #     print(i)



