import json
import os
import time
import traceback

import requests
from loguru import logger
from openai import OpenAI
from sqlalchemy.orm import sessionmaker
from werkzeug.utils import secure_filename

from biz.bizUtils import QryByUnstructCitation
from conf import Config as myconf
from models.table_defs import init_tables, PaperAnalyzeMain, TaskQueue, PaperReference

from utils.DbHelper2023 import DbHelper
from utils.ai_proxy import AiProxy
from utils.comm1 import getYM, getYMDHmsRand, get_file_md5, update_attributes
from utils.comm_preemptive_task import CommPreemptiveTask

Tbl_queue = "task_queue"
C_extract_paper_references_task = "extract_paper_references_task"
C_gain_references_detail = "gain_references_detail"  # 获得参考文献详情, 包括Title、作者、期刊、DOI 等
C_download_by_doi = "download_by_doi"


class main_mgr:
  def __init__(self):
    self.is_running = True
    self.dbHelper = DbHelper(configFile0=myconf.main_db_config, db_type=DbHelper.DB_TYPE_SQLITE)
    self.commPreemptiveTask = CommPreemptiveTask(Tbl_queue, self.dbHelper, runner=self.task_runner,
                                                 biz_id_fld_name='cont')

    self.sessionmaker = sessionmaker(bind=self.dbHelper.get_enginee())
    self.ai_proxy = self.init_AI_proxy()
    self.sys_role_content = self.build_role_content()
    # init_tables(self.dbHelper.get_enginee()) # 初始化数据库表结构, 仅需要时运行

  def start_task(self):
    while self.is_running:
      self.commPreemptiveTask.call()
      time.sleep(5)  # 控制任务执行的时间间隔

  def stop_task(self):
    self.is_running = False  # 设置标志位，退出循环
    logger.info("set main_mgr.is_running = False to exit task loop")

  def build_role_content(self):
    return """你是一个善于从文本中提取关键信息的人。"""

  def init_AI_proxy(self):
    ai_model = myconf.ai_conf['model']
    ai_client = OpenAI(base_url=myconf.ai_conf['base_url'], api_key=myconf.ai_conf['api_key'])
    return AiProxy(ai_client, ai_model)

  def insert_task_queue(self, mid, stype, req_cont):
    with self.sessionmaker() as session:
      new_task = TaskQueue(mid=mid, stype=stype, cont=req_cont)
      session.add(new_task)
      session.commit()

  def task_runner(self, biz_cont, task_id):
    logger.info(f"in task_runner, biz_cont={biz_cont}, task_id={task_id}")
    task_type = biz_cont.split(':')[0]
    if task_type == C_extract_paper_references_task:
      mid = int(biz_cont.split(':')[1])
      run_task_result = self.parse_paper_references(mid)
      if run_task_result:
        self.insert_task_queue(mid, C_gain_references_detail, f"{C_gain_references_detail}:{mid}")

    elif task_type == C_gain_references_detail:
      mid = int(biz_cont.split(':')[1])
      run_task_result = self.gain_references_detail(mid)
      if run_task_result:
        self.insert_task_queue(mid, C_download_by_doi, f"{C_download_by_doi}:{mid}")

    elif task_type == C_download_by_doi:
      mid = int(biz_cont.split(':')[1])
      run_task_result = self.download_by_doi(mid)

    else:
      logger.error(f"unknown task_type={task_type}")
      run_task_result = False

    return run_task_result

  def qry_paper_file_by_md5(self, file_md5):
    with self.sessionmaker() as session:
      record = session.query(PaperAnalyzeMain).filter_by(file_md5=file_md5).first()
      if record:
        return record.paper_file
      else:
        return None

  def add_paper_analyze(self, file_obj, ori_file_name, act="judge_reference"):
    rela_path = os.path.join(getYM(), f"{getYMDHmsRand()}_{secure_filename(ori_file_name)}")
    file_path = os.path.join(myconf.upload_dir, rela_path)
    os.makedirs(os.path.dirname(file_path), exist_ok=True)
    file_obj.save(file_path)

    file_md5 = get_file_md5(file_path)
    if self.qry_paper_file_by_md5(file_md5):
      logger.info(f"upload file {ori_file_name} already exists in db, skip adding new record")
      return None

    with self.sessionmaker() as session:
      new_record = PaperAnalyzeMain(paper_file=rela_path, file_md5=file_md5, user_id=-1)
      session.add(new_record)
      session.commit()
      new_id = new_record.id

      new_task = TaskQueue(mid=new_id, stype=f"{C_extract_paper_references_task}",
                           cont=f"{C_extract_paper_references_task}:{new_id}")
      session.add(new_task)
      session.commit()

  def extract_paper_references(self, pdf_file):
    from dl.download_ref_pdf import DRPdf
    last_page_text = DRPdf.pdf_to_txt(pdf_file, page_number=-1)  # 获取最后一页的文本

    ai_req = f"""{last_page_text} \n
任务：请从以上论文内容中，提取出原始的引用信息，以Json格式返回每个引用信息的非结构化内容，如以下所示：
[ 
  {{
    "reference": "1",
    "content": "xxx. xxx title  "
  }} ]
"""

    logger.info(f"ai_req=\n{ai_req}")
    result = self.ai_proxy.get_ai_response(ai_req)

    if not result or 'data' not in result:
      logger.error(f"ai_proxy get_ai_response failed, result={result}")
      return None
    else:
      return result['data']

  def parse_paper_references(self, mid):
    try:
      with self.sessionmaker() as session:
        main_record = session.query(PaperAnalyzeMain).filter_by(id=mid).first()
        if not main_record:
          logger.error(f"can not find paper analyze main_record by id={mid}")
          return False

        file_path = os.path.join(myconf.upload_dir, main_record.paper_file)
        if not os.path.exists(file_path):
          logger.error(f"can not find paper file by path={file_path}")
          return False

        logger.info(f"start to parse paper file={file_path}")
        paper_references = self.extract_paper_references(file_path)
        logger.info(f"parse paper file={file_path} extract_paper_references = \n {paper_references}")

        if paper_references:
          with self.sessionmaker() as session:
            for ref in paper_references:
              new_ref_record = PaperReference(paper_id=mid, doc_file=main_record.paper_file, ori_ref=ref['content'])
              session.add(new_ref_record)
            session.commit()
          return True
        else:
          logger.error(f"parse paper file={file_path} failed")

        return False
    except Exception as e:
      logger.error(traceback.format_exc())
      return False

  def gain_references_detail(self, mid):
    """  从 paper_reference 表中获取指定 paper 的 ori_ref --> 调用 crossref的接口，获取详细的参考文献信息，并更新到 paper_reference 表中 """
    try:
      with self.sessionmaker() as session:
        rs = session.query(PaperReference).filter_by(paper_id=mid).all()
        if not rs or len(rs) == 0:
          logger.error(f"can not find paper analyze main_record by id={mid}")
          return False

        unstructured_citations = [r.ori_ref for r in rs]
        qdata_xml_str = QryByUnstructCitation.build_qdata_xml_str(unstructured_citations)
        paper_references = QryByUnstructCitation.query_4_PaperReference(qdata_xml_str)

        if len(paper_references) != len(rs):
          logger.error("The length of paper_references does not match the length of rs")
          return False

        for idx,r in enumerate(rs):
          r2=paper_references[idx]
          # 如果更新全部属性，则会导致保存到数据库不成功，原因不明，故暂时只更新部分属性  --> 因为多更新了几个以 _开头的属性，导致保存不成功
          update_attrs= update_attributes(r2,r ) # ,['authors','title','journal','resource','doi','year','pages']
          # logger.info(f"update_attributes={update_attrs}")

        session.commit()
        return True
    except Exception as e:
      logger.error(traceback.format_exc())
      return False

  def download_by_doi(self, mid):
    """  从 paper_reference 表中获取指定 paper 的 doi --> 调用 下载API 下载论文，并保存到指定目录 """
    try:
      api_url0 = myconf.download_by_doi_api_url
      with self.sessionmaker() as session:
        rs = session.query(PaperReference).filter_by(paper_id=mid).all()
        if not rs or len(rs) == 0:
          logger.warning(f"can not find paper analyze main_record by id={mid}")
          return True
        for idx,r in enumerate(rs):
          api_url = api_url0.replace('_doi_', r.doi)
          response = requests.get(api_url)
          if response.status_code == 200:
            jres= response.json()
            if 'receipt_id' in jres:
              r.dl_receipt_id = jres['receipt_id']

        session.commit()
        return True
    except Exception as e:
      logger.error(traceback.format_exc())
      return False


"""
- 任务类型：paper_analyze_main 的业务要素：
  - paper_file：论文的文件(相对路径
  - user_id：上传用户的id
  - upd_time：上传时间
  - task_id：处理此paper的任务id(对应 task_queue 表的 id 字段)
  
  - Reference：参考文献N条
    - doc_file : 文献的文档
    - title : 文献的标题
    - authors : 文献的作者
    - journal : 文献的期刊
    - issue : 文献的期号
    - volume : 文献的卷
    - year : 文献的出版年份
    - pages : 文献的页码
    - abstract : 文献的摘要
    - keywords : 文献的关键词
    - doi : 文献的唯一标识符
    - ori_ref : 原始的参考文献的信息
  
"""
