import json

import requests
import time
import mysql.connector
import os
import config
from datetime import datetime

folder_path = config.folder_path
ip = config.ip
port = config.port
user = config.user
password = config.password
database = config.database


def get_now_date():

    # 获取当前日期和时间
    now = datetime.now()

    # 格式化日期为yyyy-mm-dd
    date_str = now.strftime('%Y-%m-%d')

    return date_str


def load_from_database():
    # from datetime import datetime, timedelta
    # 连接到MySQL数据库
    conn = mysql.connector.connect(
        host=ip,
        port=port,
        user=user,
        password=password,
        database=database
    )
    cursor = conn.cursor()
    # 获取当前日期和时间
    now = datetime.now()
    # one_day = timedelta(days=1)
    # yd = now-one_day
    # 格式化日期为yyyy-mm-dd
    date_str = now.strftime('%Y-%m-%d')

    exe_sql = "SELECT id, content FROM bid_source_data where create_time > " + "'" + date_str + "'"
    # exe_sql = "SELECT id, content FROM bid_source_data where id in (2509,2510,2512) "
    print(exe_sql)
    # 查询bid_source_database表中的内容
    cursor.execute(exe_sql)

    # 获取查询结果
    results = cursor.fetchall()

    # 创建一个文件夹来保存txt文件
    output_folder = folder_path
    if not os.path.exists(output_folder):
        os.makedirs(output_folder)

    # 将每条记录写入一个以id命名的txt文件
    for row in results:
        file_name = f"{output_folder}//{row[0]}.txt"
        with open(file_name, "w") as f:
            for column in row:
                f.write(str(column) + "\n")
    # 关闭数据库连接
    cursor.close()
    conn.close()


# 遍历文件夹中的文件名称
def read_file(folder_path):
    # 指定要读取的文件夹路径
    # load_from_database(folder_path)
    # 获取文件夹内容
    file_list = os.listdir(folder_path)

    # 过滤出文件，排除子文件夹
    file_names = [f for f in file_list if os.path.isfile(os.path.join(folder_path, f))]
    list = []
    # 打印文件名
    for file_name in file_names:
        obj = file_name.split(".")
        list.append(obj[0])
    return list


# 创建知识库
def creat_num(f_list):
    # API的URL
    url = 'http://81.70.145.220:7861/ai/knowledge_base/create_knowledge_base'
    # 循环的条件，
    for item in f_list:
        # POST请求的请求体
        payload = {
            "knowledge_base_name": item,
            "vector_store_type": "faiss",
            "embed_model": "qwen-api"
        }
        # 发起POST请求，并将payload作为JSON数据
        response = requests.post(url, json=payload,headers={"Connection": "close"})
        # 检查响应状态码
        if response.status_code == 200:
            # 解析响应数据
            data = response.json()
            print(data)
        else:
            print(f'状态码：{response.status_code}')
        # 等待一段时间，比如1秒
        time.sleep(5)


# 执行向量化操作
def embedding_by_llm(list):
    import time

    import requests
    import json
    for item in list:
        # 上传文件的接口URL
        upload_url = 'http://81.70.145.220:7861/ai/knowledge_base/upload_docs'

        # 设置要上传的文件的路径
        file_path = folder_path + "/" + item + ".txt"

        # 设置文件的表单字段名和其他参数
        knowledge_base_name = item
        to_vector_store = True
        override = False
        not_refresh_vs_cache = False
        chunk_size = 1024
        chunk_overlap = 50
        zh_title_enhance = False
        docs = {
            "test.txt": [
                {
                    "page_content": "custom doc",
                    "metadata": {},
                    "type": "Document"
                }
            ]
        }

        # 打开文件
        with open(file_path, 'rb') as file:
            files = {
                "files": (file.name, file),
            }
            data = {
                'knowledge_base_name': knowledge_base_name,
                'to_vector_store': to_vector_store,
                'override': override,
                'not_refresh_vs_cache': not_refresh_vs_cache,
                'chunk_size': chunk_size,
                'chunk_overlap': chunk_overlap,
                'zh_title_enhance': zh_title_enhance,
                'docs': json.dumps(docs),
            }
            start_time = time.time()
            # 发送POST请求，files参数是包含文件数据的字典，data参数是包含其他表单数据的字典
            response = requests.post(upload_url, files=files, data=data,headers={"Connection": "close"})
            time.sleep(1)
            end_time = time.time()
            print("向量化文件ID：" + str(item))
        # 打印响应内容
        print(response.text)
        print(end_time - start_time)


def del_num(f_list):
    # API的URL
    url = 'http://81.70.145.220:7861/ai/knowledge_base/delete_knowledge_base'
    # 循环的条件，
    for item in f_list:
        # POST请求的请求体
        payload = json.loads(item)
        # 发起POST请求，并将payload作为JSON数据
        response = requests.post(url, json=payload,headers={"Connection": "close"})
        # 检查响应状态码
        if response.status_code == 200:
            # 解析响应数据
            data = response.json()
            print(data)
        else:
            print(f'状态码：{response.status_code}')
        # 等待一段时间，比如1秒
        time.sleep(3)


if __name__ == '__main__':
    load_from_database()
    # list_f = read_file(folder_path)
    # # print(str(list_f))
    # del_num(list_f)
    # creat_num(list_f)
    # # # 调用api embedding方法，并存入对应知识库
    # embedding_by_llm(list_f)
    # print(get_now_date())


