import os
import re
import json
import time
import requests
import sys
import os
from datetime import datetime

# 关闭__pycache__生成
sys.dont_write_bytecode = True

# 添加项目根目录到Python路径
sys.path.append(os.path.dirname(os.path.abspath(__file__)))

# 导入日志配置
from utils.logger import setup_logging
from utils.database import DatabaseManager
from utils.uuid_utils import generate_uppercase_uuid
# 导入配置文件
from config.settings import DIFY_CONFIG, RAGFLOW_CONFIG, DATABASE_CONFIG

# 初始化日志系统
setup_logging()

# 初始化数据库管理器
db_manager = DatabaseManager(DATABASE_CONFIG)

# dify 服务
dify_server = DIFY_CONFIG.get('server')
# dify apikey
dify_apikey = DIFY_CONFIG.get('apikey')

# ragflow 服务
ragflow_server = RAGFLOW_CONFIG.get('server')
# ragflow apikey
ragflow_apikey = RAGFLOW_CONFIG.get('apikey')
# ragflow 知识库ID
ragflow_dateset_id = RAGFLOW_CONFIG.get('dataset_id')

Content_Type_Enum = {
  "json": "application/json",
}

# 请求Dify工作流文档分块标注
def req_get_mark (chunk_item):
  # 请求地址
  url = f"{dify_server}/chat-messages"
  # 请求参数
  data = {
    "query": f"开始提取：{ chunk_item.get('doc_name') } - { chunk_item.get('chunk_id') }",
    "inputs": {
      "doc_name": chunk_item.get('doc_name'),
      "chunk_text": chunk_item.get('chunk_text'),
    },
    "response_mode": "blocking",
    "conversation_id": "",
    "user": "api-interface",
    "files": []
  }
  # 请求头
  headers = {
    'Authorization': f'Bearer {dify_apikey}',
    "Content-Type": Content_Type_Enum.get('json')
  }
  # 记录开始时间
  start_time = time.time()
  print(f'提取知识库文档分块标注：【开始】分片ID：{ chunk_item.get("chunk_id") }')
  try:
    # 发送POST请求
    response = requests.post(url, headers=headers, json=data)
    # 如果响应状态码不是200，抛出异常
    response.raise_for_status()
    # 解析响应JSON
    response_data = response.json()
    answer = response_data.get('answer', '')
    if answer and answer != "null":
      try:
        answer = clear_dify_response(answer)
        # 转换成JSON
        answer_json = json.loads(answer)
        print(f"提取知识库文档分块标注：【成功】【返回值不为空】，执行耗时: {(time.time() - start_time):.2f} 秒")
        return answer_json
      except json.JSONDecodeError as e:
        print(f"提取知识库文档分块标注：解析数据失败，{e}")
        return None
    else:
      print(f"提取知识库文档分块标注：【成功】【返回值为空】，执行耗时: {(time.time() - start_time):.2f} 秒")
      return None
  except requests.RequestException as e:
    print(f"提取知识库文档分块标注异常: {e}")
    return None

# 获取知识库文档列表
def req_get_docs (dataset_id):
  # 地址
  url = f"{ragflow_server}/datasets/{dataset_id}/documents"
  # 参数
  params = {
    "id": "",
    "page": 1,
    # 分页数量
    "page_size": 1,
  }
  # 请求头
  headers = {
    'Authorization': f'Bearer {ragflow_apikey}',
    'Content-Type': Content_Type_Enum.get('json')
  }

  docs = []
  try:
    print(f"查询知识库文档列表数据：{dataset_id}")
    response = requests.get(url, headers=headers, params=params)
    # 如果响应状态码不是200，抛出异常
    response.raise_for_status()
    # 解析响应数据
    data = response.json()
    if data.get('code') != 0:
      docs = []
      print(f"查询知识库文档列表失败: {data.get('message')}")
    else:
      docs = data.get('data', {}).get('docs', [])
      print(f"知识库文档列表原始数据：{len(docs)} 条数据")
      # 过滤数据
      docs = [doc for doc in docs if doc.get('status') == '1']
      docs = [doc for doc in docs if doc.get('run') == 'DONE']
      print(f"知识库文档列表过滤数据：{len(docs)} 条数据")
  except requests.exceptions.RequestException as e:
    print(f"查询知识库文档列表失败: {e}")
    docs = []
  return docs

# 获取知识库文档分块数据
def req_get_doc_chunks (doc_id, dataset_id):
  # 请求地址
  url = f"{ragflow_server}/datasets/{dataset_id}/documents/{doc_id}/chunks"
  # 请求参数
  params = {
    "page": 1,
    "page_size": 500,
  }
  # 请求头
  headers = {
    'Authorization': f'Bearer {ragflow_apikey}',
    'Content-Type': Content_Type_Enum.get('json')
  }

  chunks = []
  try:
    print(f"查询知识库文档分块数据: {doc_id}")
    response = requests.get(url, headers=headers, params=params)
    # 如果响应状态码不是200，抛出异常
    response.raise_for_status()
    # 解析响应数据
    data = response.json()
    if data.get('code') != 0:
      chunks = []
      print(f"查询知识库文档分块数据失败: {data.get('message')}")
    else:
      chunks = data.get('data', {}).get('chunks', [])
      # 封装数据
      print(f"知识库文档分块原始数据：{len(chunks)} 条数据")
      chunks = [chunk for chunk in chunks if chunk.get('available') == True]
      print(f"知识库文档分块过滤数据：{len(chunks)} 条数据")
  except requests.exceptions.RequestException as e:
    print(f"查询知识库文档分块数据失败: {e}")
    chunks = []
  return chunks

# 清洗dify 返回结果
def clear_dify_response (llm_res: str):
  if llm_res is None:
    return "null"
  # 去除 </think> 标签中的内容
  clear_res = re.sub(r'<think[^>]*>.*?</think>', '', llm_res, flags=re.DOTALL)
  # 去除开头的换行符
  clear_res = re.sub(r'^\n+', '', clear_res)
  # 替换```json\n | json\n | \n```
  clear_res = clear_res.replace("```json\n", "").replace("json\n", "").replace("\n```", "")
  return clear_res

# 处理知识库文档分块数据
def deal_doc_chunks_data (chucks, doc):
  new_sqls = []
  new_chunks = []
  print(f'====== 遍历知识库文档分片数据【开始】({doc.get("name")})[{doc.get("id")}] ======')
  # 记录开始时间
  start_time = time.time()
  for chunk in chucks:
    # 如果分块不可用，跳过
    if (chunk.get('available') == False):
      continue
    new_item = {
      "doc_id": doc.get('id'),
      "chunk_id": chunk.get('id'),
      "doc_name": doc.get('name'),
      "dataset_id": chunk.get('dataset_id'), 
      "chunk_text": chunk.get('content', ''),
    }
    
    # 请求dify接口获取标注
    mark = req_get_mark(new_item)
    # 如果标注为空，跳过
    if (mark is None):
      continue
    mark_info = mark.get('children', [])
    etype_arr = []
    if mark_info is None:
      continue
    # 组装类型数据
    for child in mark_info:
      etype_arr.append(f"{child['etype']}({child['type']})")
    if (len(etype_arr) == 0):
      continue
    etype_content = ", ".join(etype_arr)
    # 设置内容
    new_item['province'] = mark.get('province', '')
    new_item['city'] = mark.get('city', '')
    new_item['district'] = mark.get('district', '')
    new_item['content'] = etype_content
    
    # 封装SQL
    sql_data = generate_chunk_mark_sql(new_item)
    new_sqls.append(sql_data.get('delete_sql'))
    new_sqls.append(sql_data.get('insert_sql'))
    del new_item['chunk_text']
    new_chunks.append(new_item)

  print(f'====== 遍历知识库文档分片数据【结束】({doc.get("name")})(耗时: {(time.time() - start_time):.2f} 秒) ======')
  return {
    "sqls": new_sqls,
    "chunks": new_chunks
  }

# 生成SQL语句
def generate_chunk_mark_sql(item):
  file_year = datetime.now().year
  insert_time =  datetime.now().strftime('%Y-%m-%d %H:%M:%S')
  # 插入表名
  table_name = "DSP_CHUNKS_EXTRACT"
  # 构建插入语句
  insert_sql = f"INSERT INTO {table_name} (EXTRACT_ID, PROVINCE, CITY, DISTRICT, DOC_ID, CHUNK_ID, DATASET_ID, EXTRACT_CONTENT, FILE_YEAR, CREATED_TIME, UPDATED_TIME) VALUES"
  insert_sql = f"{insert_sql} ('{generate_uppercase_uuid()}', '{item['province']}','{item['city']}','{item['district']}','{item['doc_id']}', '{item['chunk_id']}', '{item['dataset_id']}', '{item['content']}', '{file_year}', '{insert_time}', '{insert_time}');"
  # 构建删除语句
  delete_sql = f"DELETE FROM {table_name} WHERE DOC_ID = '{item['doc_id']}' AND CHUNK_ID = '{item['chunk_id']}';"
  return {
    "insert_sql": insert_sql, "delete_sql": delete_sql
  }

# 插入文档标注数据
def insert_data_to_databases(doc_chunk, doc_id):
  print(f"插入标注数据：{doc_id}")
  # db_type: mysql,oracle,polardb
  db_manager.execute_sqls_commit(sqls=doc_chunk.get('sqls'), db_type='mysql')
  return True
  # 如果插入的方法不行，需要自行写个接口，插入数据
  # 地址
  url = "接口地址"
  # 参数
  params = {
    "chunks": doc_chunk.get('chunks')
  }
  # 请求头
  headers = {
    'Content-Type': 'application/json'
  }
  try:
    print(f"插入标注数据：{doc_id}")
    response = requests.post(url, headers=headers, data=params)
    # 如果响应状态码不是200，抛出异常
    response.raise_for_status()
    # 解析响应数据
    data = response.json()
    if data.get('code') != 200:
      print(f"插入标注数据失败: {data.get('msg')}")
      return False
    else:
      print(f"插入标注数据成功: {data.get('msg')}")
      return True
  except requests.exceptions.RequestException as e:
    print(f"插入标注数据失败: {e}")
    return False

# 生成的结果写入文件
def write_result_to_file(content, dir_name: str = 'result', file_name: str = 'chunks.json'):
  try:
    # 构建文件目录
    file_dir = os.path.join(os.path.dirname(__file__), dir_name)
    if not os.path.exists(file_dir):
      os.makedirs(file_dir)
      # 使用logging而不是print
      print(f"数据目录不存在，创建目录: {file_dir}")
    
    # 构建文件完整路径
    file_path = os.path.join(file_dir, file_name)
    with open(file_path, 'w', encoding='utf-8') as file:
      json.dump(content, file, ensure_ascii=False, indent=4)
      print(f"成功写入文件: {file_name}")
    return True
  except Exception as e:
    print(f"写入文件失败: {str(e)}")
    return False


if __name__ == '__main__':
  # 记录开始时间
  start_time = time.time()
  dataset_id = ragflow_dateset_id
  # 获取知识库文档
  docs = req_get_docs(dataset_id)
  print('====== 开始遍历知识库文档数据 ======')
  
  # 创建记录文件路径
  record_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'note')
  record_file = os.path.join(record_dir, 'execut-record.txt')
  
  # 确保记录目录存在
  if not os.path.exists(record_dir):
    os.makedirs(record_dir)
    print(f"创建记录目录: {record_dir}")
  
  # 读取已执行记录
  executed_records = set()
  if os.path.exists(record_file):
    try:
      with open(record_file, 'r', encoding='utf-8') as f:
        for line in f:
          line = line.strip()
          if line:
            executed_records.add(line)
      print(f"已读取 {len(executed_records)} 条执行记录")
    except Exception as e:
      print(f"读取执行记录失败: {e}")
  
  # 处理每个文档
  skipped_count = 0
  processed_count = 0
  # 遍历知识库文档
  for doc in docs:
    doc_id = doc.get('id')
    doc_name = doc.get('name')
    # 创建记录标识
    record_key = f"{dataset_id}-{doc_id}"
    
    # 检查是否已执行过
    if record_key in executed_records:
      print(f"文档【{doc_name} ({doc_id}) 】已经执行过，跳过处理")
      skipped_count += 1
      continue
    
    # 执行文档处理
    print(f"====== 开始处理文档: {doc_name} ({doc_id})")
    # 获取知识库文档分块
    chunks = req_get_doc_chunks(doc_id, dataset_id)
    # 处理文档分块数据（调用dify获取标注）
    doc_chunck = deal_doc_chunks_data(chunks, doc)
    # 插入标注数据
    insert_data_to_databases(doc_chunck, doc_id)
    # 写入结果文件
    write_result_to_file(doc_chunck, 'result', f'[{doc_name}]-chunks.json')
    
    # 记录已执行文件
    try:
      with open(record_file, 'a', encoding='utf-8') as f:
        f.write(f"{record_key}\n")
      print(f"记录文档执行: {record_key}")
      processed_count += 1
    except Exception as e:
      print(f"记录文档执行失败: {e}")
  
  print(f"本次处理文档: {processed_count} 个文档，跳过: {skipped_count} 个文档")
  print(f"成功完成知识库文档分块标注，总耗时: {(time.time() - start_time):.2f} 秒")
