import requests
import json
import re
from requests.auth import HTTPBasicAuth
import json
import time
import sys,os

from elasticsearch import Elasticsearch
from elasticsearch import helpers

from loguru import logger
from datetime import datetime,timedelta

logger.add("for-debug-dify.log", level="DEBUG")
import sys,os

from multiprocessing import Pool
from multiprocessing import Manager
import multiprocessing
from loguru import logger


class UnAuthorizationException(Exception):
    pass

REQUEST_TIMEOUT = 60
settings = {
    "AI_API_URL_FOR_DATASETS":"http://192.168.1.5:38080/v1/datasets",
    "AI_API_KEY_FOR_DATASETS":"dataset-k5ASe6iCpmHKRTT2lzJGQLNd",
    "AI_API_URL_FOR_DOC_METADATA":"http://192.168.1.5:38080/console/api/datasets",
    "AI_API_USER_FOR_DOC_METADATA":"pixeli@qq.com",
    "AI_API_PASSWORD_FOR_DOC_METADATA":"vPn**123456",
    "AI_API_URL_FOR_LOGIN":"http://192.168.1.5:38080/console/api/login",
}

def get_api_key():
    api_url = settings['AI_API_URL_FOR_DOC_METADATA']
    api_user = settings['AI_API_USER_FOR_DOC_METADATA']
    api_password = settings['AI_API_PASSWORD_FOR_DOC_METADATA']
    api_login = settings['AI_API_URL_FOR_LOGIN']
    headers = {
        'Authorization': f'Bearer',
        'Content-Type': 'application/json',
    }
    data = {
        "email": f'{api_user}',
        "password":f'{api_password}'
    }
    # Send POST request
    logger.trace('start post for {}'.format(api_login))
    start_post = time.time()
    response = requests.post(api_login, headers=headers, data=json.dumps(data),timeout=REQUEST_TIMEOUT)
    logger.trace('end post for {}'.format(api_login))
    end_post = time.time()
    response.raise_for_status()  # Check if request was successful
    access_token = response.json().get('data').get('access_token')
    api_key = access_token
    return api_key

def create_datasets(name):
    api_url = settings['AI_API_URL_FOR_DATASETS']
    api_key = settings['AI_API_KEY_FOR_DATASETS']

    # Request headers
    headers = {
        'Authorization': f'Bearer {api_key}',
        'Content-Type': 'application/json',
    }
    # Request payload
    data = {
        "name": f'{name}',
        "permission": "all_team_members",#"only_me",
    }
    to_result = requests.post(api_url, data=json.dumps(data), headers=headers)
    return to_result

def list_datasets():
    api_url = settings['AI_API_URL_FOR_DATASETS']
    api_key = settings['AI_API_KEY_FOR_DATASETS']

    # Request headers
    headers = {
        'Authorization': f'Bearer {api_key}',
        'Content-Type': 'application/json',
    }
    # Request payload
    data = {
        "name": "text",
        "text": "blocking",
        "indexing_technique":"high_quality",
        "process_rule": {"mode": "automatic"}
    }

    # Send request
    request_url = api_url+"?limit=20&page=1"

    data = {
    }
    response = requests.get(request_url, headers=headers, data=json.dumps(data),timeout=50)
    response.raise_for_status()  # Check if request was successful
    result  = response.json().get('data')
    return result

def update_datasets(datasets_id,permission="all_team_members",model_type=None,api_key=None):
    api_url = settings['AI_API_URL_FOR_DOC_METADATA']
    # Request headers
    headers = {
        'Authorization': f'Bearer {api_key}',
        'Content-Type': 'application/json',
    }
    # Request payload
    if model_type=="local":
        data = {
            "permission":permission,
            "indexing_technique": "high_quality",
            "embedding_model": "bge-m3:latest", #quentinz/bge-large-zh-v1.5:latest",#bge-m3:latest",
            "embedding_model_provider": "ollama",
            "embedding_available": True,
            'retrieval_model_dict': {'search_method': 'semantic_search', 'reranking_enable': False},
        }
    else:
        data = {
            "permission":permission,
            "indexing_technique": "high_quality",
        }

    request_url = api_url+"/"+f'{datasets_id}'
    response = requests.patch(request_url, headers=headers, data=json.dumps(data),timeout=50)
    response.raise_for_status()  # Check if request was successful
    result  = response.json().get('data')
    return result


def create_datasets_doc(dataset_id,catelogy,doc,only_name=False):

    api_url = settings['AI_API_URL_FOR_DATASETS']
    api_key = settings['AI_API_KEY_FOR_DATASETS']

    # Request headers
    headers = {
        'Authorization': f'Bearer {api_key}',
        'Content-Type': 'application/json',
    }

    id_ =None
    get_=None
    if only_name==False:
        for i in ID_FIELD[catelogy]:
            get_ = doc.get(i,None)
            if get_:
                break
        id_ = get_

    get_=None
    for i in NAME_FIELD[catelogy]:
        get_ = doc.get(i,None)
        if get_:
            break
    name_ = get_
    if id_:
        name_=name_+"_编号_"+str(id_)
    # Request payload
    data = {
        "name": name_,
        "text": doc.get('abstract',doc.get('中文摘要',""))+"\n"+doc.get('full_text',doc.get('正文',"")),
        "indexing_technique":"high_quality",
        #"process_rule": {"mode": "automatic"},
        "process_rule":{
            "rules":{
                "pre_processing_rules":[{"id":"remove_extra_spaces","enabled":True},{"id":"remove_urls_emails","enabled":False}],
                "segmentation":{"separator":"@@@","max_tokens":1000,"chunk_overlap":100},
                "parent_mode":"full-doc",
                "subchunk_segmentation":{"separator":"@@@","max_tokens":1000,"chunk_overlap":100}
            },
            "mode":"hierarchical"
        },
        "doc_form":"hierarchical_model",
        "doc_language":"Chinese"
    }
    error = False
    request_url = api_url+f'/{dataset_id}'+"/document/create_by_text"
    try:
        # Send POST request
        logger.trace('start post for {}'.format(request_url))
        start_post = time.time()
        response = requests.post(request_url, headers=headers, data=json.dumps(data),timeout=REQUEST_TIMEOUT)
        end_post = time.time()
        post_cost = 1000*(end_post-start_post)
        logger.trace('end post for {},post cost {}ms'.format(request_url,post_cost))
        response.raise_for_status()  # Check if request was successful
        json_res = response.json()
        return json_res,error
    except requests.exceptions.RequestException as e:
        error = True
        logger.error("Exception {}, for name_ {}".format(e,name_))
        return None,error
    except json.JSONDecodeError as e:
        error = True
        logger.error("Exception {}, for name_ {}".format(e,name_))
        return None,error

def update_datasets_doc(dataset_id,doc_id,catelogy,doc,only_name=False):

    api_url = settings['AI_API_URL_FOR_DATASETS']
    api_key = settings['AI_API_KEY_FOR_DATASETS']

    # Request headers
    headers = {
        'Authorization': f'Bearer {api_key}',
        'Content-Type': 'application/json',
    }
    id_ = None
    get_=None
    if only_name==False:
        for i in ID_FIELD[catelogy]:
            get_ = doc.get(i,None)
            if get_:
                break
        id_ = get_

    get_=None
    for i in NAME_FIELD[catelogy]:
        get_ = doc.get(i,None)
        if get_:
            break
    name_ = get_

    if id_:
        name_= name_+"_编号_"+str(id_)
    # Request payload
    data = {
        "name": name_,
        "text": doc.get('abstract',doc.get('中文摘要',""))+"\n"+doc.get('full_text',doc.get('正文',"")),
        "indexing_technique":"high_quality",
        "process_rule": {"mode": "automatic"}
    }
    error = False
    request_url = api_url+f'/{dataset_id}'+"/document/"+f'{doc_id}'+"update-by-text"
    try:
        # Send POST request
        logger.trace('start post for {}'.format(request_url))
        start_post = time.time()
        response = requests.post(request_url, headers=headers, data=json.dumps(data),timeout=REQUEST_TIMEOUT)
        end_post = time.time()
        post_cost = 1000*(end_post-start_post)
        logger.trace('end post for {},post cost {}ms'.format(request_url,post_cost))
        response.raise_for_status()  # Check if request was successful
        json_res = response.json()
        return json_res,error
    except requests.exceptions.RequestException as e:
        error = True
        logger.error(f"Request error: {e}")
        return None,error
    except json.JSONDecodeError as e:
        error = True
        logger.error(f"JSON parse error: {e}")
        return None,error

ES_INDEX_NAME = {
    "专利-180":"patent-180",
    "论文-180":"journal-180",
    "政策-180":"policies-180",
    "资讯-180":"news-180",
}

#名称字段
NAME_FIELD= {
    '专利-180':['patent_name','title'],
    "论文-180":["title","篇名"],
    "政策-180":["标题",'title'],
    "资讯-180":["标题","title"],
}

#ID字段
ID_FIELD = {
    "专利-180":["internal_id_elasticsearch","application_number","filename"],
    "论文-180":["internal_id_elasticsearch","resource_id"],
    "政策-180":['internal_id_elasticsearch',"自增id"],
    "资讯-180":['internal_id_elasticsearch',"自增id"]
}

g_subprocess_share_dict= multiprocessing.Manager().dict()
patent_all =  multiprocessing.Manager().list()
patent_failed =  multiprocessing.Manager().list()

class multiProcessingHandler(multiprocessing.Process):
    def __init__(self,catelogy,search_key,from_,step_size,c_number,list_record,list_failed_record,g_subprocess_share_dict):
        multiprocessing.Process.__init__(self)
        self.catelogy = catelogy
        self.search_key = search_key 
        self.from_ = from_
        self.step_size = step_size
        self.c_number = c_number
        self.list_record= list_record
        self.list_failed_record = list_failed_record
        self.g_subprocess_share_dict = g_subprocess_share_dict
    def run(self):
        patent_names = []
        self.pid_record = os.getpid()
        catelogy = self.catelogy
        search_key = self.search_key
        from_ = self.from_
        step_size = self.step_size
        c_number = self.c_number
        query = {}
        query_ = {
            "exists": {
                "field": f'{search_key}'
            },
        }
        query['sort'] = [
                {"internal_id_elasticsearch":{"order": "desc",'missing':"_last","unmapped_type":"long"}},
        ]
        query['track_scores']=True
        query['track_total_hits']=True
        query['query'] = query_
        query['size'] = step_size
        query['from'] = from_

        es_instance = Elasticsearch(hosts=["https://119.254.155.105:19202"], request_timeout=3600, basic_auth=('elastic', 'EPPso10r8Ja'),verify_certs=False) #,ca_certs='/path/to/your/ca.pem')  # 如果有 CA 证书的路
        current_ids = []
        response = es_instance.search(index=index_name, body=query)#,from_=i)
        total_hits = response['hits']['total']['value']
        docs = response['hits']['hits']
        docs_length = len(docs)
        docs_to_add_fields = []
        ai_not_need_handle_list= []
        count = 0
        ai_not_need_handle = 0
        ai_failure_count = 0
        doc_c  = 0
        api_key = get_api_key()
        for doc in docs:
            try:
                doc_c = doc_c + 1
                logger.trace('--------->{}'.format(doc))
                doc_data =doc.pop('_source',None)
                doc_data['_id'] = doc.get('_id')
                current_ids.append(doc['_id'])
                patent_names.append(doc_data.get('patent_name',doc_data.get('title')))
                if doc_data is None:
                    logger.error("PID {} doc_data None".format(self.pid_record,doc_data))
                    break
                id_ =None
                get_=None
                name_field = None
                for i in NAME_FIELD[catelogy]:
                    get_ = doc_data.get(i,None)
                    if get_:
                        name_field = i
                        break
                name_patent_ = get_
                name_patent_original = get_
                for i in ID_FIELD[catelogy]:
                    get_ = doc_data.get(i,None)
                    if get_:
                        break
                id_ = get_
                result = create_datasets_doc(datasets_id,catelogy,doc_data)
                if result is None or len(result)<=0 or result[0] is None:
                    logger.error("PID {} doc {} name {} 被传递到Dify错误,search_key{}".format(self.pid_record,doc['_id'],name_patent_,search_key))
                    self.list_failed_record.append(name_patent_)
                    ai_failure_count = ai_failure_count+1
                    continue
                dify_id = result[0].get('document')['id']
                self.list_record.append(name_patent_)
                logger.debug("PID {} doc {} name {} 被传递到Dify,Dify中存储的ID 为 {},search_key{}".format(self.pid_record,doc['_id'],name_patent_,dify_id,search_key))
            except Exception as e:
                logger.error("PID {} exception {} doc {}.".format(self.pid_record,e,doc['_id']))
                continue

ds_name = "180-test-20250411"
to_result = create_datasets(ds_name)
logger.info("创建知识库结果为 {}".format(to_result.status_code))
datasets_id = None 
if to_result.status_code==200:
    datasets_id = to_result.json()['id']
elif to_result.status_code == 409:
    datasets = list_datasets()
    for ds in datasets:
        if ds['name'] == ds_name:
            datasets_id = ds['id']
            break
logger.info("创建知识库ID为 {}".format(datasets_id))
update_datasets(datasets_id,permission="all_team_members",model_type="local",api_key=get_api_key())

es_instance = Elasticsearch(hosts=["https://119.254.155.105:19202"], request_timeout=3600, basic_auth=('elastic', 'EPPso10r8Ja'),verify_certs=False)
for search_tag in ['人参','碳纤维']:
    for catelogy in ['专利-180',"论文-180",'政策-180','资讯-180']:
        processes = []
        search_key = f'{search_tag}'+"_year_2019_to_2023"
        query = {}
        query_ = {
            "exists": {
                "field":f'{search_key}'
            },
        }
        query['sort'] = [
                {"internal_id_elasticsearch":{"order": "desc",'missing':"_last","unmapped_type":"long"}},
        ]
        query['track_total_hits']=True
        query['query'] = query_
        query['size'] = 0 
        index_name=ES_INDEX_NAME[catelogy]
        response = es_instance.search(index=index_name, body=query)
        max_size = response['hits']['total']['value']
        step_size = 2000
        c_number = 0
        for i in range(0, max_size, step_size):
            c_number = c_number+1
            from_ = i
            #开启子进程进行处理并等待结束
            m = multiProcessingHandler(catelogy,search_key,from_,step_size,c_number,patent_all,patent_failed,g_subprocess_share_dict)
            processes.append(m)
        tmp_count = 0
        for process in processes:
            tmp_count=tmp_count+1
            logger.debug("Process {} Started.".format(tmp_count))
            process.daemon = True
            process.start()
        for process in processes:
            process.join()
len1 = len(patent_all)
len2 = len(list(set(patent_all)))
len3 = len(patent_failed)
len4 = len(list(set(patent_failed)))
logger.debug("Dict All {} Failed {} Names 长度 {},去重后 {};失败的 Names 长度 {},去重后 {} .".format(patent_all,patent_failed,len1,len2,len3,len4))

def find_duplicates(lst):
    return list(set([x for x in lst if lst.count(x) > 1]))
logger.debug("Names 中重复元素为 {} .".format(find_duplicates(patent_all)))
logger.debug("失败的包括{} .".format(patent_failed))
