from neo4j import GraphDatabase
import warnings
from datetime import datetime
import math
import logging
import multiprocessing
import time
import os
# 设置日志配置
from loguru import logger

from util_upload_to_dify import *
logger.add("for-debug-neo4j-calculate.log", level="DEBUG")

#logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')

#warnings.filterwarnings("ignore", category=UserWarning)

# 创建数据库驱动对象的函数
def create_driver():
    uri = "bolt://119.254.155.85:17687"  # Neo4j 数据库地址
    username = "neo4j"  # 数据库用户名
    password = "vPn**12@@56"  # 数据库密码
    return GraphDatabase.driver(uri, auth=(username, password))

class ApplicationDateException(Exception):
    pass

class PatentAllCountException(Exception):
    pass

#observation_time 观察时间
def calculate_individual_metrics(patent_name,driver,handled_records,error_records):
    pid_record = os.getpid()
    metrics = {}
    time_start = time.time()
    with driver.session() as session:
        try:
            if handled_records is not None:
                handled_records.append(patent_name)

            time_start_ = time.time()
            #查询专利总数，后续S2/S3/S4均需要
            query_patent_count = "MATCH (n:专利) RETURN count(n) AS patent_count"
            result_patent_count = session.run(query_patent_count)
            patent_count = result_patent_count.single()["patent_count"]
            time_end_ = time.time()
            time_cost = 1000*(time_end_-time_start_)
            time_start_ = time.time()
            logger.debug(f"PID {pid_record} processing {patent_name},patent_count cost {time_cost} ms")
            if patent_count<=0:
                logger.error(f"PID {pid_record} all patent count is {patent_count}.")
                raise PatentAllCountException(patent_name)

            # 查询专利的申请日期，后续S1需要
            query_1 = f"""
            MATCH (n:专利{{name: '{patent_name}'}})-[:专利_申请日]->(m:申请日)
            RETURN m.name AS application_date limit 1
            """
            result_1 = session.run(query_1)
            application_date = result_1.single()
            if application_date is None:
                logger.error(f"PID {pid_record} No application date found for patent {patent_name}.")
                raise ApplicationDateException(patent_name)
            time_end_ = time.time()
            time_cost = 1000*(time_end_-time_start_)
            time_start_ = time.time()
            logger.debug(f"PID {pid_record} processing {patent_name},application date cost {time_cost}ms")
            # 查询专利的关键词，后续S5需要
            query_keywords = """
            MATCH (p:专利 {name: $patent_name})-[:`专利_关键词`]->(k:关键词)
            RETURN k.name AS keyword
            """
            keywords = session.run(query_keywords, patent_name=patent_name)
            if not keywords:
                logger.error(f"PID {pid_record} No keywords found for patent {patent_name}.")
                raise KeywordsException(patent_name)

            time_end_ = time.time()
            time_cost = 1000*(time_end_-time_start_)
            time_start_ = time.time()
            logger.debug(f"PID {pid_record} processing {patent_name},keywords cost {time_cost}ms")
            # S1 技术新颖度
            # S1 越大越新颖
            application_date_str = application_date["application_date"]
            # 将申请日期转为日期对象
            application_date_str = application_date_str.split('T')[0]  # 只取日期部分
            application_date = datetime.strptime(application_date_str, "%Y-%m-%d")
            # 观察时间为 2023-12-31
            observation_time = datetime(2023, 12, 31)
            time_diff = (observation_time - application_date).days  # 计算 t - t_i，单位为天

            S1 = math.exp(-0.002 * time_diff)
            logger.trace(f"PID {pid_record} calculated S1 for {patent_name}")

            time_end_ = time.time()
            time_cost = 1000*(time_end_-time_start_)
            time_start_ = time.time()
            logger.debug(f"PID {pid_record} processing {patent_name},s1 cost {time_cost}ms")
            # S2 主题新颖度
            # TODO S2是越小越好!
            # S2 被修正为，在一段时间里，与当前专利具有至少一个相同关键词的数量与总专利的比例
            # 相对于某个关键词的专利占比求平均，这个计算方法会将新颖度变低
            # 时间比较，是字符串比较，时间都是 XXXX-XX-XXT00:00:00Z
            query_2 = f"""
                      MATCH (p:专利{{name: '{patent_name}'}})-[:`专利_关键词`]->(k:关键词)
                      <-[:`专利_关键词`]->(relatedPatent:专利)
                      -[:`专利_申请日`]->(applicationDate:申请日)
                      WHERE applicationDate.name >= '2021-01-01' AND applicationDate.name <= '2023-12-31'
                      RETURN COUNT(DISTINCT relatedPatent) AS related_patents_in_2021_2023
                      """
            result_2 = session.run(query_2)
            related_patents_in_2021_2023 = result_2.single()["related_patents_in_2021_2023"]
            if patent_count<=0:
                logger.error(f"PID {pid_record} all patent count is {patent_count}.")
                return patent_name, {"error": f"All patent count is 0 for {patent_name}."}
            S2 = related_patents_in_2021_2023 / patent_count if patent_count > 0 else 0
            logger.trace(f"PID {pid_record} calculated S2 for {patent_name}")
            time_end_ = time.time()
            time_cost = 1000*(time_end_-time_start_)
            time_start_ = time.time()
            logger.debug(f"PID {pid_record} processing {patent_name},s2 cost {time_cost}ms")

            # S3 主题覆盖范围
            # S3越大越覆盖率高
            query_3 = f"""
                       MATCH (m:专利{{name: '{patent_name}'}})-[:`专利_关键词`]->(k:关键词)<-[:`专利_关键词`]->(m2:专利)
                       WHERE m2.name <> '{patent_name}'
                       RETURN COUNT(DISTINCT m2) AS patents_with_same_keywords
                       """
            result_3 = session.run(query_3)
            patents_with_same_keywords = result_3.single()["patents_with_same_keywords"]
            S3 = patents_with_same_keywords / patent_count if patent_count > 0 else 0
            logger.trace(f"PID {pid_record} calculated S3 for {patent_name}")
            time_end_ = time.time()
            time_cost = 1000*(time_end_-time_start_)
            time_start_ = time.time()
            logger.debug(f"PID {pid_record} processing {patent_name},s3 cost {time_cost}ms")

            # S4 技术影响力
            # S4越大越影响力大
            query_4 = f"""
                        MATCH (p:专利{{name: '{patent_name}'}})-[:`专利_关键词`]->(k:关键词)<-[:`专利_关键词`]->(related:专利)
                        WHERE related.name <> '{patent_name}'
                        RETURN COUNT(DISTINCT related) AS patents_related_by_one_degree
                        """
            result_4 = session.run(query_4)
            patents_related_by_one_degree = result_4.single()["patents_related_by_one_degree"]
            S4 = patents_related_by_one_degree / patent_count if patent_count > 0 else 0
            logger.trace(f"PID {pid_record} calculated S4 for {patent_name}")
            time_end_ = time.time()
            time_cost = 1000*(time_end_-time_start_)
            time_start_ = time.time()
            logger.debug(f"PID {pid_record} processing {patent_name},s4 cost {time_cost}ms")

            # S5 技术持久性
            # 某个关键词是否出现在某年的关键词中；出现为1
            # 所有的关键词、所有衡量年份，都这样判断下，做加和
            # 再除以所有关键词*年份跨度
            S5_total = 0
            N = 0  # 技术专利提取关键词数量
            T = 3  # 2021-2023年
            keyword_count = 0  # 用于记录关键词的数量

            for keyword in keywords:
                keyword_name = keyword["keyword"]
                query_years = """
                          MATCH (k:关键词 {name: $keyword_name})-[:`关键词_年份标签`]->(y:年份标签)
                          WHERE y.name >= '2021' AND y.name <= '2023'
                          RETURN y.name AS year, COUNT(*) AS count
                          """
                years = session.run(query_years, keyword_name=keyword_name)
                c_it_sum = sum(1 for year in years if year["count"] > 0)

                if c_it_sum > 0:
                    N += 1  # 计算关键词数量

                S5_total += c_it_sum
                keyword_count += 1  # 记录有效关键词

            S5 = S5_total / (T * N) if N > 0 else 0

            logger.trace(f"PID {pid_record} calculated S5 for {patent_name}")
            time_end_ = time.time()
            time_cost = 1000*(time_end_-time_start_)
            time_start_ = time.time()
            logger.debug(f"PID {pid_record} processing {patent_name},length {keyword_count},s5 cost {time_cost}ms")

            # 计算重要性得分
            importance_score = (S1 + S2 + S3 + S4 + S5) / 5  # 假设每个指标的权重相同
            logger.trace(f"PID {pid_record} calculated importance_score for {patent_name}")

            # 更新 Neo4j 中的专利节点
            update_query = f"""
            MATCH (p:专利 {{name: '{patent_name}'}})
            SET p.S1 = {S1}, p.S2 = {S2}, p.S3 = {S3}, p.S4 = {S4}, p.S5 = {S5}, p.importance_score = {importance_score}
            """
            session.run(update_query)
            time_end_ = time.time()
            time_cost = 1000*(time_end_-time_start_)
            time_start_ = time.time()
            logger.debug(f"PID {pid_record} processing {patent_name},store cost {time_cost}ms")

            # 将指标和一些重要信息放入专利字典
            metrics[patent_name] = {
                "S1": S1,
                "S2": S2,
                "S3": S3,
                "S4": S4,
                "S5": S5,
                "importance_score": importance_score,
                "application_date": application_date_str,
                "patent_count":patent_count,
                "patents_related_by_one_degree": patents_related_by_one_degree,
            }

        except Exception as e:
            if error_records is not None:
                error_records.append(patent_name)
                len_error = len(error_records)
                logger.error(f"PID {pid_record} Error processing patent {patent_name}, current error records length {len_error}")
            time_end = time.time()
            cost_time = 1000*(time_end-time_start)
            logger.error(f"PID {pid_record} Error processing patent {patent_name}: {e}, cost {cost_time}ms")
            import traceback
            traceback.print_exc()
            logger.error("PID {} patent {} excetion format {}.".format(pid_record,patent_name,traceback.format_exc()))
            return patent_name, {"error": str(e)}
    time_end = time.time()
    cost_time = 1000*(time_end-time_start)
    logger.debug(f"PID {pid_record} calculating patent {patent_name} all cost {cost_time}ms.")
    return patent_name, metrics

class multiProcessingHandler(multiprocessing.Process):
    def __init__(self,from_,page_size,c_number,handled_records,error_records,rebuild_flag=False):
        multiprocessing.Process.__init__(self)
        self.from_ = from_
        self.page_size= page_size
        self.c_number = c_number
        self.handled_records= handled_records
        self.error_records = error_records
        self.rebuild_flag = rebuild_flag
    def run(self):
        driver = create_driver()  # 创建数据库驱动对象
        patents = []
        self.pid_record = os.getpid()
        circle_number = self.c_number
        from_ = self.from_
        page_size= self.page_size
        logger.info(f"-->PID {self.pid_record} for from {from_},limit {page_size},current circle {circle_number}")
        with driver.session() as session:
            # 查询专利的列表，分页处理，排除已经处理过的专利
            if self.rebuild_flag:
                query_patents = f"""
                MATCH (p:专利)
                RETURN p.name AS patent_name
                SKIP {from_}
                LIMIT {page_size}
                """
            else:
                query_patents = f"""
                MATCH (p:专利)
                WHERE p.importance_score is null
                RETURN p.name AS patent_name
                SKIP {from_}
                LIMIT {page_size}
                """
            patent_records = session.run(query_patents)
            for patent in patent_records:
                patents.append(patent["patent_name"])
            records_length_ = len(patents)
            logger.info(f"execute query from {from_},limit {page_size},result length {records_length_},current circle {circle_number}")
        if not patents:
            logger.error(f"execute query patents length 0,current circle {circle_number}")
            return
        progress =0
        records_length_ = len(patents)
        for patent_name in patents:
            progress=progress+1
            logger.info(f"execute query from {from_},limit {page_size},current circle {circle_number},current progress {progress}/{records_length_} start.")
            time_r_s = time.time()
            calculate_individual_metrics(patent_name,driver,self.handled_records,self.error_records)
            time_r_e = time.time()
            time_r_cost = 1000*(time_r_e-time_r_s)
            logger.info(f"execute query from {from_},limit {page_size},current circle {circle_number},current progress {progress}/{records_length_} end, time cost {time_r_cost}ms.")
        driver.close()  # 确保释放资源


# 分页处理专利数据
def process_all_patents(page_size,record_handled=None,record_error_to_retry=None,rebuild_flag=False):
    pid_record = os.getpid()
    skip = 0
    driver = create_driver()  # 创建数据库驱动对象
    circle_number = 0
    patent_count = 0
    processes = []
    with driver.session() as session:
        if rebuild_flag:
             query_patent_count = f""" MATCH (p:专利) RETURN count(p) AS patent_count """
        else:
             query_patent_count = f""" MATCH (p:专利) WHERE p.importance_score is null RETURN count(p) AS patent_count """
        result_patent_count = session.run(query_patent_count)
        patent_count = result_patent_count.single()["patent_count"]
    driver.close()  # 确保释放资源
    for i in range(0, patent_count , page_size):
        circle_number = circle_number+1
        from_ = i
        #开启子进程进行处理并等待结束
        m = multiProcessingHandler(from_,page_size,circle_number,record_handled,record_error_to_retry,rebuild_flag)
        processes.append(m)
        tmp_count = 0
    for process in processes:
        tmp_count=tmp_count+1
        logger.debug("Process {} Started.".format(tmp_count))
        process.daemon = True
        process.start()
    for process in processes:
        process.join()


class multiProcessingHandlerForDifyTransfer(multiprocessing.Process):
    def __init__(self,from_,page_size,c_number):
        multiprocessing.Process.__init__(self)
        self.from_ = from_
        self.page_size= page_size
        self.c_number = c_number
    def run(self):
        driver = create_driver()
        api_key = get_api_key()
        self.pid_record = os.getpid()
        circle_number = self.c_number
        from_ = self.from_
        page_size= self.page_size
        logger.info(f"-->PID {self.pid_record} for from {from_},limit {page_size},current circle {circle_number}")
        result_patent_ = []
        with driver.session() as session:
            query_patent_ = f"MATCH (n:专利) RETURN n order by n.importance_score desc SKIP {from_} LIMIT {page_size}"
            result_patent_ = session.run(query_patent_).data()
            patent_length = len(result_patent_)
        patent_length = len(result_patent_)
        logger.debug(f"PID {self.pid_record} processing length {patent_length}")
        progress_count = 0
        for patent_ in result_patent_:
            progress_count = progress_count +1
            time_start_ =time.time()
            doc_data = patent_['n']
            try:
                logger.trace('PID {} --------->{}'.format(self.pid_record,doc_data))
                if doc_data is None:
                    logger.error("PID {} doc_data None".format(self.pid_record,doc_data))
                    continue
                id_ =None
                get_=None
                for i in ['patent_name','title']:
                    get_ = doc_data.get(i,None)
                    if get_:
                        break
                name_patent_ = get_
                for i in ["internal_id_elasticsearch","_id_elasticsearch","application_number","filename"]:
                    get_ = doc_data.get(i,None)
                    if get_:
                        break
                id_ = get_
                if id_:
                    name_patent_=name_patent_+"_编号_"+str(id_)
                catelogy = "neo4j"
                doc_data['_id'] = doc_data.get('_id_elasticsearch',None)
                doc_data['_index'] = doc_data.get('_index_elasticsearch',None)
                index_name = doc_data['_index']
                doc_data['internal_id_elasticsearch'] = doc_data.get('_id_elasticsearch',None)
                result = create_datasets_doc(datasets_id,catelogy,doc_data)
                if result is None or len(result)<=0 or result[0] is None:
                    logger.error('PID {} patent {} id {},上传dify失败'.format(self.pid_record,name_patent_,doc_data['_id']))
                    continue
                dify_id = result[0].get('document')['id']
                result = update_doc_metadata(datasets_id,dify_id,catelogy,doc_data,api_key)
                time_end_ = time.time()
                time_cost_ = 1000*(time_end_-time_start_)
                logger.debug('PID {} circle_number {} patent {} id {} 上传dify成功,dify id 为{},花费时间为{}ms, progress {}/{}.'.format(self.pid_record,circle_number,name_patent_,doc_data['_id'],
                    dify_id,time_cost_,progress_count,patent_length))
            except Exception as e:
                import traceback
                traceback.print_exc()
                logger.error("PID {} patent {} excetion format {}.".format(self.pid_record,name_patent_,traceback.format_exc()))
                continue
        driver.close()  # 确保释放资源

if __name__ == "__main__":

    time_main_start = time.time()
    record_handled = multiprocessing.Manager().list()
    record_error_to_retry =  multiprocessing.Manager().list()
    rebuild_flag = True
    process_all_patents(page_size=2000,record_handled=record_handled,record_error_to_retry=record_error_to_retry,rebuild_flag=rebuild_flag)
    #打印最终结果
    len_processed = len(record_handled)
    len_error = len(record_error_to_retry)

    len_processed_set = len(list(set(record_handled)))
    len_error_set = len(list(set(record_error_to_retry)))
    time_main_end = time.time()
    time_main_cost = 1000*(time_main_end-time_main_start)
    logger.info(f"Processed length : {len_processed}, Error length : {len_error}; 去重后 Processed length : {len_processed_set}, Error length : {len_error_set}，总花费时间{time_main_cost}ms.")


    ds_name = "180项目-(2014年到2023年)-重点专利"
    to_result = create_datasets(ds_name)
    logger.info("创建知识库结果为 {}".format(to_result.status_code))
    datasets_id = None
    if to_result.status_code==200:
        datasets_id = to_result.json()['id']
    elif to_result.status_code == 409:
        datasets = list_datasets()
        for ds in datasets:
            if ds['name'] == ds_name:
                datasets_id = ds['id']
                break
    logger.info("创建知识库ID为 {}".format(datasets_id))
    update_datasets(datasets_id,permission="all_team_members",model_type="local",api_key=get_api_key())
    driver = create_driver()  # 创建数据库驱动对象
    circle_number = 0
    processes = []
    patent_count =0
    with driver.session() as session:
        query_patent_count = f""" MATCH (p:专利) RETURN count(p) AS patent_count """
        result_patent_count = session.run(query_patent_count)
        patent_count = result_patent_count.single()["patent_count"]
    driver.close()  # 确保释放资源
    topK = math.floor(patent_count*20/100)
    page_size = 2000
    logger.info("获取topK {} 按page_size {} 传递到dify.".format(topK,page_size))
    for i in range(0, topK, page_size):
        circle_number = circle_number+1
        from_ = i
        step_size = page_size
        if circle_number*page_size > topK:
            step_size = topK-(circle_number-1)*page_size
        #开启子进程进行处理并等待结束
        m = multiProcessingHandlerForDifyTransfer(from_,step_size,circle_number)
        processes.append(m)
        tmp_count = 0
    for process in processes:
        tmp_count=tmp_count+1
        logger.debug("Process {} Started.".format(tmp_count))
        process.daemon = True
        process.start()
    for process in processes:
        process.join()
    logger.info("获取的topK {} 已传递到dify.".format(topK))
