#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
@Time    : 24-5-24 下午15:37
@Author  : zhangweihao
@File    : tasks.py
'''
import os
import sys
import logging
import time
import secrets
import hashlib
import uuid
import asyncio
sys.path.append('/home/tutu/公司/finance_backend/glbx_backend')
import PyPDF2
from docx import Document
import json
import requests
import string
from configs.config import Configs
from model.db_sql import DBSql
from typing import List
import pandas as pd
import textwrap
from PIL import Image, ImageDraw, ImageFont
from flask import g, session, request
from io import BytesIO
import base64

# def check_file_isnt_handled(file_paths:List[str]):
#     filtered_file_paths = []
#     db = DBSql()
#     for file_path in file_paths:
#         file_name = file_path.split('/')[-1]
#         table_name = 'privacy_data' + file_name.split('-')[-1][:8]
#         sql = f"""
#             SELECT handle_status FROM {table_name} WHERE privacy_data_name LIKE '%{file_name}%' AND ai_status = '无需研判'
#         """
#         if 'ocr' in file_name:
#             file_name = file_name.split('_')[0]
#             sql = f"""
#             SELECT handle_status FROM {table_name} WHERE privacy_data_name LIKE '%{file_name}%' AND ai_status IN ('研判失败','研判成功')
#             """
#             # sql = f"""
#             # SELECT handle_status FROM {table_name} WHERE privacy_data_name LIKE '%{file_name}%' 
#             # AND (marked_path LIKE '%.png%' OR marked_path LIKE '%.jpg%') 
#             # AND marked_path NOT LIKE '%marked%'
#             # """
#         try:
#             with db.query_pd():
#                 df = pd.read_sql_query(sql, con=db.conn)['handle_status']
#                 handle_status_list = ['']
#                 if not df.empty:
#                     handle_status_list = df.tolist()
#             if all(element == '未处理' for element in handle_status_list):
#                 filtered_file_paths.append(file_path)
#         except Exception as e:
#             pass
#     return filtered_file_paths

def check_file_isnt_handled(file_paths:List[str]):
    filtered_file_paths = []
    db = DBSql()
    for file_path in file_paths:
        if not os.path.exists(file_path):
            continue
        file_name = file_path.split('/')[-1]
        file_name = file_name.split('_')[0]
        table_name = 'privacy_data' + file_name.split('-')[-1][:8]
        sql = f"""
            SELECT handle_status FROM {table_name} WHERE privacy_data_name LIKE '%{file_name}%'
        """
        try:
            with db.query_pd():
                df = pd.read_sql_query(sql, con=db.conn)['handle_status']
                handle_status_list = ['']
                if not df.empty:
                    handle_status_list = df.tolist()
            if any(element == '未处理' for element in handle_status_list):
                filtered_file_paths.append(file_path)
        except Exception as e:
            pass
    return filtered_file_paths

def search_keyword_in_files(folder_path, keyword):
    file_path_list = []
    # 遍历文件夹中的所有文件
    for root, dirs, files in os.walk(folder_path):
        for file in files:
            file_path = os.path.join(root, file)
            file_extension = os.path.splitext(file_path)[1].lower()

            try:
                # 读取文本文件内容
                if file_extension == '.txt':
                    with open(file_path, 'r') as f:
                        content = f.read()
                        if keyword in content:
                            if file_path not in file_path_list:
                                file_path_list.append(file_path)

                # 读取 docx 文件内容
                elif file_extension == '.docx':
                    doc = Document(file_path)
                    for paragraph in doc.paragraphs:
                        if keyword in paragraph.text:
                            if file_path not in file_path_list:
                                file_path_list.append(file_path)

                # 读取 PDF 文件内容
                elif file_extension == '.pdf':
                    with open(file_path, 'rb') as f:
                        reader = PyPDF2.PdfReader(f)
                        for page_num in range(len(reader.pages)):
                            page = reader.pages[page_num]
                            text = page.extract_text()
                            if keyword in text:
                                if file_path not in file_path_list:
                                    file_path_list.append(file_path)
            except Exception as e:
                print(f"Error processing {file_path}: {e}")
    return file_path_list

# file_path = "/home/tutu/公司/finance_backend/glbx_backend/static/marked_files/20240528/06/HTTP-FGQqzTJly7FRQsCja-CiR6EACZfOae4ktFj-20240528061630_1_ocr.txt"

def get_keyword_in_files_by_searcher(keyword):
    data = {
    "keyword": keyword
    }

    # 发送POST请求
    response = requests.post(f"http://{Configs().BASE_URL}:{Configs().SEARCHER_PORT}/get_path", json=data)
    if response.status_code == 200:
        return response.json().get('data', {}).get('path_list', [])
    else:
        return []


def upload_article(file_path, dataset_id, api_key):
    url = Configs().DIFY_URL + f'v1/datasets/{dataset_id}/document/create_by_file'
    headers = {
        'Authorization': f'Bearer {api_key}'
    }

    data = {
        'data': '{"indexing_technique":"high_quality","process_rule":{"rules":{"pre_processing_rules":[{'
                '"id":"remove_extra_spaces","enabled":true},{"id":"remove_urls_emails","enabled":true}],'
                '"segmentation":{"separator":"###","max_tokens":500}},"mode":"custom"}}'
    }

    files = {
        'file': open(file_path, 'rb')
    }

    response = requests.post(url, headers=headers, data=data, files=files)
    if response.status_code == 200:
        data = json.loads(response.text)
        return {
            'document_id': data.get('document',{}).get('id'),
            'batch': data.get('batch'),
        }
    return {}

def del_artical_by_id(document_id, dataset_id, api_key):
    url = Configs().DIFY_URL + f'v1/datasets/{dataset_id}/documents/{document_id}'
    api_key = f'{api_key}'

    headers = {
        'Authorization': 'Bearer ' + api_key
    }

    response = requests.delete(url, headers=headers)

    if response.status_code == 200:
        res = True if json.loads(response.text).get('result') == 'success' else False
        return res
    return False

def change_dict(item):
    result = []
    if isinstance(item, dict):
        for k, v in item.items():
            if isinstance(v, dict):
                result.append({
                    "title": k,
                    "key": str(uuid.uuid4()),
                    "children": change_dict(v)
                })
            elif isinstance(v, list):
                if all(isinstance(_, str) for _ in v):
                    result.append({
                        "title": k,
                        "key": str(uuid.uuid4()),
                        "desc": change_dict(v)
                    })
                else:
                    result.append({
                        "title": k,
                        "key": str(uuid.uuid4()),
                        "children": change_dict(v)
                    })
            else:
                result.append({
                    "title": k,
                    "key": str(uuid.uuid4()),
                    "desc": change_dict(v)
                })
        return result
    elif isinstance(item, list):
        for i, v in enumerate(item):
            # item[i] = change_dict(v)
            result.append(change_dict(v))
        return result
    elif isinstance(item, str):
        return item

# def workflow(api_key, name):
#     font_path1 = "utils/SimHei.ttf"
#     font_path2 = "glbx_backend/utils/SimHei.ttf"
#     font = font_path1 if os.path.exists(font_path1) else font_path2
#     url = Configs().DIFY_URL + 'v1/workflows/run'
#     headers = {
#         'Authorization': f'Bearer {api_key}',
#         'Content-Type': 'application/json'
#     }

#     data = {
#         "inputs": {
#             "name": name,
#         },
#         "response_mode": "streaming",
#         "user": "abc-123"
#     }

#     response = requests.post(url, headers=headers, json=data)
#     response_list = response.text.split('\n\n')
#     res = []
#     document_info = []
#     for i in response_list:
#         if '{' in i and '}' in i:
#             data = json.loads(i[5:])
#             if (data.get('data',{}).get('outputs',{}).get('result',{})
#                     and isinstance(data.get('data',{}).get('outputs',{}).get('result',{}), list)):
#                 for j in data.get('data',{}).get('outputs',{}).get('result',{}):
#                     document_name = j.get('metadata').get('document_name')
#                     content = j.get('content')
#                     ducument_dict = {
#                         'document_id': j.get('metadata').get('document_id'),
#                         'document_name': document_name,
#                         'content': content,
#                         'format': document_name.rsplit('.', 1)[-1],
#                         'document_base64_img':convert_str2base64img(content, font)
#                     }
#                     document_info.append(ducument_dict)

#             if (data.get('event') == 'workflow_finished' and data.get('data', {}).get('status') == 'succeeded'
#                     and data.get('data', {}).get('outputs')):
#                 res = change_dict(json.loads(data.get('data').get('outputs').get('text')[7:-3]))
#     if res:
#         res.append({
#             'title': "关联来源",
#             'key': str(uuid.uuid4()),
#             'desc': "/|\\"
#         })
#     else:
#         document_info, res = [], []
#     return document_info, res

def workflow(api_key, name):
    font_path1 = "utils/SimHei.ttf"
    font_path2 = "glbx_backend/utils/SimHei.ttf"
    font = font_path1 if os.path.exists(font_path1) else font_path2
    url = Configs().DIFY_URL + 'v1/workflows/run'
    headers = {
        'Authorization': f'Bearer {api_key}',
        'Content-Type': 'application/json'
    }

    data = {
        "inputs": {
            "name": name,
        },
        "response_mode": "streaming",
        "user": "abc-123"
    }

    response = requests.post(url, headers=headers, json=data)
    response_list = response.text.split('\n\n')
    res = []
    document_info = []
    document_data = []
    document_name_set = {}
    res1 = {}
    try:
        for i in response_list:
            if '{' in i and '}' in i:
                data = json.loads(i[5:])
                if (data.get('data',{}).get('outputs',{}).get('result',{})
                        and isinstance(data.get('data',{}).get('outputs',{}).get('result',{}), list)):
                    document_data = data.get('data',{}).get('outputs',{}).get('result',{})
                    document_name_set = {j.get('metadata').get('document_name') for j in document_data}
                elif (data.get('event') == 'workflow_finished' and data.get('data', {}).get('status') == 'succeeded'
                        and data.get('data', {}).get('outputs')):
                    res1 = json.loads(data.get('data').get('outputs').get('text')[7:-3])
                    res = change_dict(res1)

        for document_name in document_name_set:
            content = [j.get('content') for j in document_data if j.get('metadata').get('document_name') == document_name]
            document_base64_img = [{'title':j[:10],'img':convert_str2base64img(j, font)} for j in content]
            format = document_name.rsplit('.', 1)[-1]
            document_id = [j.get('metadata').get('document_id') for j in document_data 
                        if j.get('metadata').get('document_name') == document_name][0]
            document_dict = {
                            'document_id': document_id,
                            'document_name': document_name,
                            'content': content,
                            'format': format,
                            'document_base64_img':document_base64_img
                        }
            document_info.append(document_dict)
                
        if res:
            res.append({
                'title': "关联来源",
                'key': str(uuid.uuid4()),
                'desc': "/|\\"
            })
    
        else:
            document_info, res, res1 = [], [], {}
    except Exception as e:
        logging.error(e)
        document_info, res, res1 = [], [], {}
    return document_info, res, res1

def workflow_blocking(api_key, json_, mode):
    url = Configs().DIFY_URL + 'v1/workflows/run'
    headers = {
        'Authorization': f'Bearer {api_key}',
        'Content-Type': 'application/json'
    }

    data = {
        "inputs": {
            "json": json_,
            "mode": mode,
        },
        "response_mode": "blocking",
        "user": "abc-123"
    }

    response = requests.post(url, headers=headers, json=data)
    return response.json()

def upload_all_articals(atrticals:List[str], dataset_id, api_key):
    batch_list = []
    for artical_path in atrticals:
        document_name = artical_path.split('/')[-1]
        if not if_articals_exist(document_name,dataset_id,api_key):
            res = upload_article(file_path=artical_path,dataset_id=dataset_id, api_key=api_key)
            if res:
                batch_list.append(res.get('batch'))
                document_id = res.get('document_id')
                base_directory = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))))
                document_path = os.path.relpath(artical_path, base_directory)
                insert_document_info(document_id,document_path,document_name)
    while True:
        for i, batch in enumerate(batch_list):
            status = check_file_status(batch,dataset_id,api_key)
            if status == "completed":
                batch_list[i] = 0
        if not any(batch_list):
            break



def insert_document_info(document_id,document_path,document_name):
    db = DBSql()
    sql = """
        INSERT INTO risk_retrieval_file2id (document_id, document_path, document_name) VALUES (%s, %s, %s)
    """
    with db.insert(sql, (document_id,document_path,document_name)):
        pass

def if_articals_exist(document_name, dataset_id, api_key):
    url = Configs().DIFY_URL + f'v1/datasets/{dataset_id}/documents'
    api_key = f'{api_key}'

    headers = {
        'Authorization': 'Bearer ' + api_key
    }

    params = {
    'keyword': document_name
    }

    response = requests.get(url, headers=headers, params=params)

    if response.status_code == 200:
        documents = response.json()
        return documents.get('total')

def check_file_status(batch, dataset_id, api_key):
    url = Configs().DIFY_URL + f'v1/datasets/{dataset_id}/documents/{batch}/indexing-status'
    headers = {'Authorization': f'Bearer {api_key}'}
    response = requests.get(url, headers=headers)
    data = json.loads(response.text).get('data')
    if data:
        return data[0].get("indexing_status")

def convert_str2base64img(text, font, cutline=True):

    # 选择字体和字号
    font = ImageFont.truetype(font, 20)

    # 分割文本成多行
    lines = text.splitlines()

    # 计算图片的高度
    line_height = font.font.getsize("hg")[0][1]  # 行高
    line_spacing = 10  # 设置行间距
    image_height = (line_height + line_spacing) * len(lines)

    # 创建图像
    max_width = max([font.font.getsize(line)[0][0] for line in lines])
    image = Image.new("RGB", (max_width, image_height), "white")
    draw = ImageDraw.Draw(image)

    # 将文字写入图像
    y = 0
    for line in lines:
        draw.text((0, y), line, fill="black", font=font)
        y += line_height + line_spacing

    # 保存图像
    image_stream = BytesIO()
    image.save(image_stream, format="PNG")
    image_stream.seek(0)
    image_data = image_stream.getvalue()
    base64_image = base64.b64encode(image_data).decode()
    return base64_image

def delete_knowledge_base(name, token):
    dataset_id = get_knowledge_base_id(name, token)
    url = Configs().DIFY_URL + f'console/api/datasets/{dataset_id}'
    headers = {
        'Authorization': f'Bearer {token}',
        'Content-Type': 'application/json'
    }
    response = requests.delete(url=url, headers=headers)
    if response.status_code == 204:
        return '删除成功'
    return '删除失败'

# 弃用，该方法不能配置模型
def create_knowledge_base(name, api_key):
    url = Configs().DIFY_URL + f'v1/datasets'
    headers = {
        'Authorization': f'Bearer {api_key}',
        'Content-Type': 'application/json'
    }
    data = {
        'name': name,
    }

    response = requests.post(url, headers=headers, json=data)

    if response.status_code == 200:
        return json.loads(response.text).get('id')

def login_dify(email, passwd):
    url = Configs().DIFY_URL + f'console/api/login'
    data = {
        'email': email,
        'password': passwd,
        'remember_me':'true'
    }
    response = requests.post(url=url, json=data)
    if response.status_code == 200:
        data = response.json()
        if data.get('result') == 'success':
            return True, data.get('data')
    return False, ''

def get_knowledge_base_id(name, token):
    url = Configs().DIFY_URL + f'console/api/datasets?page=1&limit=30&keyword={name}'
    headers = {
        'Authorization': f'Bearer {token}',
        'Content-Type': 'application/json'
    }

    response = requests.get(url, headers=headers)
    if response.status_code == 200:
        data = response.json().get('data')
        if data:
            for item in data:
                if item.get('name') == name:
                    return item.get('id')
    return ''

# 创建知识库时，无法配置模型等参数，需要上传一个文件用于配置知识库，配置完删除即可
def upload_init_file_by_token(token):
    headers = {
        'Authorization': f'Bearer {token}',
    }
    current_dir = os.path.dirname(os.path.realpath(__file__))
    url = Configs().DIFY_URL + f'console/api/files/upload?source=datasets'
    files = {'file': open(current_dir + '/init.txt', 'rb')}
    response = requests.post(url, headers=headers, files=files)
    if response.status_code == 201:
        data = response.json().get('id')
        return data

def create_knowledge_base_by_token(name, token):
    exsit_knowledge_base_id = get_knowledge_base_id(name, token)
    if exsit_knowledge_base_id:
        return exsit_knowledge_base_id
    url = Configs().DIFY_URL + f'console/api/datasets/init'
    headers = {
        'Authorization': f'Bearer {token}',
        'Content-Type': 'application/json'
    }
    data = {"data_source":{
            "type":"upload_file",
            "info_list":{
                "data_source_type":"upload_file",
                "file_info_list":{
                    "file_ids":[upload_init_file_by_token(token)]
                    }
                }
            },
            "indexing_technique":"high_quality",
            "process_rule":{
                "rules":{},
                "mode":"automatic"
                },
            "doc_form":"text_model",
            "doc_language":"Chinese",
            "retrieval_model":{
                "search_method":"hybrid_search",
                "reranking_enable":'true',
                "reranking_model":{
                    "reranking_provider_name":"xinference",
                    "reranking_model_name":"bge-reranker-v2-m3"
                    },
                "top_k":10,
                "score_threshold_enabled":'true',
                "score_threshold":0.5
                }
            }
    
    response = requests.post(url=url, headers=headers, json=data)
    dataset_id = response.json().get('dataset',{}).get('id')
    batch = response.json().get('batch')
    while True:
        status = check_indexing_status(dataset_id, batch, token)
        logging.warning(status)
        if status == 'completed':
            break
        if status == False:
            raise ValueError('网络访问失败')
        time.sleep(1)
    change_knowledge_base_name_and_des(dataset_id, name, token)
    file_id = get_init_file_id(dataset_id, token)
    delete_init_file(dataset_id, file_id, token)
    return dataset_id
        
def check_indexing_status(dataset_id, batch, token):
    url = Configs().DIFY_URL + f'console/api/datasets/{dataset_id}/batch/{batch}/indexing-status'
    headers = {
        'Authorization': f'Bearer {token}',
        'Content-Type': 'application/json'
    }

    response = requests.get(url, headers=headers)
    if response.status_code == 200:
        data = response.json().get('data', {})
        if data:
            return data[0].get('indexing_status')
    return False

def change_knowledge_base_name_and_des(dataset_id, name, token):
    url = Configs().DIFY_URL + f'console/api/datasets/{dataset_id}'
    data = {
        "name":name,
        "description":f"这是关于{name}的知识库",
        "permission":"only_me",
        "indexing_technique":"high_quality",
        "retrieval_model":{
            "search_method":"hybrid_search",
            "reranking_enable":'true',
            "reranking_model":{
                "reranking_provider_name":"xinference",
                "reranking_model_name":"bge-reranker-v2-m3"
                },
            "top_k":10,
            "score_threshold_enabled":'true',
            "score_threshold":0.5
            },
            "embedding_model":"bge-m3",
            "embedding_model_provider":"xinference"
        }
    headers = {
        'Authorization': f'Bearer {token}',
        'Content-Type': 'application/json'
    }
    response = requests.patch(url=url, headers=headers, json=data)
    if response.status_code == 200:
        return '修改成功'
    return '修改失败'

def get_init_file_id(dataset_id, token):
    url = Configs().DIFY_URL + f'console/api/datasets/{dataset_id}/documents?page=1&limit=15&keyword=init.txt&fetch='
    headers = {
        'Authorization': f'Bearer {token}',
        'Content-Type': 'application/json'
    }
    response = requests.get(url=url, headers=headers)
    if response.status_code == 200:
        data = response.json().get('data')
        if data:
            return data[0].get('id')
            
def delete_init_file(dataset_id, file_id, token):
    url = Configs().DIFY_URL + f'console/api/datasets/{dataset_id}/documents/{file_id}'
    headers = {
        'Authorization': f'Bearer {token}',
        'Content-Type': 'application/json'
    }
    response = requests.delete(url=url, headers=headers)
    if response.status_code == 204:
        return '删除成功'
    return '删除失败'

def get_workflow_id_by_name(name, token):
    url = Configs().DIFY_URL + f'console/api/apps?page=1&limit=30&name={name}'
    headers = {
        'Authorization': f'Bearer {token}',
        'Content-Type': 'application/json'
    }
    response = requests.get(url=url, headers=headers)
    if response.status_code == 200:
        data = response.json().get('data',[])
        if data:
            for item in data:
                if item.get('name') == name:
                    return item.get('id')
    return ''

def get_workflow_draft_by_id(workflow_id, token):
    url = Configs().DIFY_URL + f'console/api/apps/{workflow_id}/workflows/draft'
    headers = {
        'Authorization': f'Bearer {token}',
        'Content-Type': 'application/json'
    }
    response = requests.get(url=url, headers=headers)
    if response.status_code == 200:
        return response.json()
    return {}

def get_dataset_id_in_draft(ids, token):
    url = Configs().DIFY_URL + f'console/api/datasets?page=1'
    for id_ in ids:
        url += '&ids=' + id_
    headers = {
        'Authorization': f'Bearer {token}',
        'Content-Type': 'application/json'
    }
    response = requests.get(url=url, headers=headers)
    
    if response.status_code == 200:
        return [i.get('id') for i in response.json().get('data',[])]

def edit_workflow_dataset(workflow_id,graph,features,token,hash_):
    url = Configs().DIFY_URL + f'console/api/apps/{workflow_id}/workflows/draft'
    data = {
        "graph":graph,
        "features":features,
        # "hash":create_hash(),
        "hash":hash_,
    }
    headers = {
        'Authorization': f'Bearer {token}',
        'Content-Type': 'application/json'
    }
    response = requests.post(url=url, headers=headers, json=data)
    if response.status_code == 200:
        return True
    return False

def get_knowledge_base_ids_from_workflow_draft(name, token):
    workflow_id = get_workflow_id_by_name(name, token)
    draft = get_workflow_draft_by_id(workflow_id, token)
    dataset_id_list = []
    graph = {}
    if draft:
        graph = draft.get('graph', {})
        data1 = graph.get('nodes', [])
        if data1:
            for i,v in enumerate(data1):
                x = v.get('data',{}).get('dataset_ids')
                if x:
                    dataset_id_list = x
                    break
    return dataset_id_list

def add_knowledge_base2workflow_by_workflow_name(database_id, name, token):
    workflow_id = get_workflow_id_by_name(name, token)
    draft = get_workflow_draft_by_id(workflow_id, token)
    hash_ = draft.get('hash')
    dataset_id_list = []
    features = {}
    graph = {}
    if draft:
        features = draft.get('features', {})
        graph = draft.get('graph', {})
        data1 = graph.get('nodes', [])
        if data1:
            dataset_index = None
            for i,v in enumerate(data1):
                x = v.get('data',{}).get('dataset_ids')
                if x:
                    dataset_id_list = x
                    dataset_index = i
                    break
    if not dataset_index:
        return
    filter_id_list = get_dataset_id_in_draft(dataset_id_list, token)
    filter_id_list.append(database_id)
    graph['nodes'][dataset_index]['data']['dataset_ids'] = filter_id_list
    # print(graph)
    edit_workflow_dataset(workflow_id, graph, features, token, hash_)
    time.sleep(0.5)
    publish_the_workflow_by_id(workflow_id, token)
    return

def publish_the_workflow_by_id(workflow_id, token):
    url = Configs().DIFY_URL + f'console/api/apps/{workflow_id}/workflows/publish'
    headers = {
        'Authorization': f'Bearer {token}',
        'Content-Type': 'application/json'
    }
    response = requests.post(url=url, headers=headers)
    if response.status_code == 200:
        return True
    return False

def search_dataset_id_by_name(name, token):
    url = Configs().DIFY_URL + f'console/api/datasets?page=1&limit=30&keyword={name}'
    headers = {
        'Authorization': f'Bearer {token}',
        'Content-Type': 'application/json'
    }
    response = requests.get(url=url, headers=headers)
    if response.status_code == 200:
        data = response.json().get('data')
        if data:
            return data[0].get('id')
    return ''

def get_document_id(dataset_id, document_name, token):
    url = Configs().DIFY_URL + f'console/api/datasets/{dataset_id}/documents?page=1&limit=15&keyword={document_name}&fetch='
    headers = {
        'Authorization': f'Bearer {token}',
        'Content-Type': 'application/json'
    }
    response = requests.get(url=url, headers=headers)
    if response.status_code == 200:
        data = response.json().get('data')
        if data:
            return data[0].get('id')
    return ''
            
def delete_document_by_id(dataset_id, document_id, token):
    url = Configs().DIFY_URL + f'console/api/datasets/{dataset_id}/documents/{document_id}'
    headers = {
        'Authorization': f'Bearer {token}',
        'Content-Type': 'application/json'
    }
    response = requests.delete(url=url, headers=headers)
    if response.status_code == 204:
        return '删除成功'
    return '删除失败'

def delete_document_when_handle_privacy(name,token):
    db = DBSql()
    sql = f"""
        SELECT name FROM risk_monitoring_list
    """
    with db.query_pd():
        name_set = set(pd.read_sql_query(sql, con=db.conn)['name'].tolist())
    for dataset_name in name_set:
        dataset_id = search_dataset_id_by_name(dataset_name, token)
        document_id = get_document_id(dataset_id, name, token)
        if document_id:
            delete_document_by_id(dataset_id, document_id, token)

# 获取workflow的apikey，若不存在则会自动创建
def get_workflow_apikey_by_id(workflow_id, token):
    url = Configs().DIFY_URL + f'console/api/apps/{workflow_id}/api-keys'
    headers = {
        'Authorization': f'Bearer {token}',
        'Content-Type': 'application/json'
    }
    response = requests.get(url=url, headers=headers)
    data = response.json().get('data')
    if not data:
        return create_workflow_apikey_by_id(workflow_id, token)
    else:
        return data[0].get('token')

# 为workflow创建apikey
def create_workflow_apikey_by_id(workflow_id, token):
    url = Configs().DIFY_URL + f'console/api/apps/{workflow_id}/api-keys'
    headers = {
        'Authorization': f'Bearer {token}',
        'Content-Type': 'application/json'
    }
    response = requests.post(url=url, headers=headers)
    return response.json().get('token')
        

if __name__ == "__main__":
    # res = upload_article(file_path=file_path,dataset_id=dataset_id,api_key=api_key)
    # print(res)
    # res = del_artical_by_id('7d37e489-6740-4318-9cb0-70306a2ebc5b',dataset_id,api_key)
    # print(res)
    # res = if_articals_exist('123123',dataset_id,api_key)
    # print(res)
#     convert_str2base64img("""'因家庭原因，6 年前辞去工行副行长等职
# 公开信息显示，张红力 1984 年 7 月毕业于黑龙江八一农垦大学，1989 年 9
# 月获得加拿大阿尔伯特大学遗传学硕士学位，1991 年 7 月获得美国加州圣哥拉大
# 学工商管理硕士学位，曾任德意志银行（中国）有限公司（以下简称“德银中国”）
# 董事长等职。
# 2001 年 3 月，张红力任德意志投资银行亚洲分行大中华区主管、副董事长兼
# 中国区主席，德意志投资银行亚洲区总裁；2004 年 10 月起，张红力任德意志银
# 行投资银行全球执行委员会委员、亚太区总裁，德银中国董事长'""")
    # dataset_id = "843d81b6-4faf-4269-8c8a-940e89b4e3a4"
    api_key = "dataset-je6PZrvfvwbdd77xOLcGvwvW"
    # upload_all_articals(['/home/tutu/公司/finance_backend/glbx_backend/static/marked_files/20240528/06/HTTP-FGQqzTJly7FRQsCja-CiR6EACZfOae4ktFj-20240528061630.pdf', '/home/tutu/公司/finance_backend/glbx_backend/static/marked_files/20240528/06/HTTP-FGQqzTJly7FRQsCja-CiR6EACZfOae4ktFj-20240528061630_1_ocr.txt', '/home/tutu/公司/finance_backend/glbx_backend/static/marked_files/20240528/06/HTTP-FGQqzTJly7FRQsCja-CiR6EACZfOae4ktFj-20240528061630.docx'],
    #                     dataset_id=dataset_id,api_key=api_key)
    # upload_article('/home/tutu/公司/finance_backend/glbx_backend/static/marked_files/20240528/06/HTTP-FGQqzTJly7FRQsCja-CiR6EACZfOae4ktFj-20240528061630.pdf',
    #                     dataset_id=dataset_id,api_key=api_key)
    # print(check_file_status('20240531082915529173', dataset_id,api_key))
    # print(if_articals_exist('HTTP-FGQqzTJly7FRQsCja-CiR6EACZfOae4ktFj-20240528061630.pdf',dataset_id,api_key))
    email = 'hkxa@admin.com'
    passwd = 'hkxa@123#123'
    print(get_keyword_in_files_by_searcher('张红力'))
    # d,r = workflow(Configs().api_key_workflow, '顾国明')
    # print(d)
    # print(r)
    
    # print(get_knowledge_base_id('test', email, passwd))
    # res = create_knowledge_base('张炜昊', api_key)
    # print(res)
    # flag, token = login_dify(email, passwd)
    # if not flag:
    #     print('登录失败')
    #     exit()
    # res = search_dataset_id_by_name('张红力', token)
    # res = delete_document_when_handle_privacy('HTTP-FnUj3o4sZT7GUinb04-CJMeQt2LFwaaQBTVM3-20240604084612', token)
    # print(res)
    # check_indexing_status('0c20e935-68ef-4227-b228-099ad464e029',"20240603054526474418")
    # create_knowledge_base_by_token('张炜昊',token)
    # change_knowledge_base_name_and_des('8f492ba0-e85b-4b78-94fc-16e1cd586645', '张炜昊', token)
    # print(get_init_file_id(dataset_id='a02739a6-c2d9-4cae-8687-0363d4ac4d1e', token=token))
    # print(if_knowledge_base_exist('test', token))
    # delete_knowledge_base('af12f0e9-2a25-4ad6-a710-a6c2db127cb6',token)
    #############
    # print(get_knowledge_base_ids_from_workflow_draft('银行高管敏感信息分析V1.2', token))
    ##############
    # workflow_id = get_workflow_id_by_name('银行高管敏感信息分析V1.2', token)
    # draft = get_workflow_draft_by_id(workflow_id, token)
    # hash_ = draft.get('hash')
    # dataset_id_list = []
    # features = {}
    # graph = {}
    # if draft:
    #     features = draft.get('features', {})
    #     graph = draft.get('graph', {})
    #     data1 = graph.get('nodes', [])
    #     if data1:
    #         dataset_index = None
    #         for i,v in enumerate(data1):
    #             x = v.get('data',{}).get('dataset_ids')
    #             if x:
    #                 dataset_id_list = x
    #                 dataset_index = i
    #                 break
    # filter_id_list = get_dataset_id_in_draft(dataset_id_list, token)
    # filter_id_list.append('843d81b6-4faf-4269-8c8a-940e89b4e3a4')
    # graph['nodes'][dataset_index]['data']['dataset_ids'] = filter_id_list
    # # print(graph)
    # print(edit_workflow_dataset(workflow_id, graph, features, token, hash_))
