import os
import numpy as np
import h5py
import copy
from itertools import islice
from src.utils import tokenCounter
import json
from tqdm import tqdm
import random
import requests
import json
import re
from sympy import sympify, to_dnf, And, Or
from html2text import html2text
from concurrent.futures import ThreadPoolExecutor, as_completed
from threading import Lock

lock = Lock()


class database():

    def __init__(self,) -> None:
        self.base_url = r'http://180.184.65.98:38880/atomgit/'
        self.token_counter = tokenCounter()
    #changed
    def rerank(self, query, candidates):
        url = "https://open.bigmodel.cn/api/paas/v4/rerank"
        headers = {
            "Authorization": "86e99e4867bb45e1bd7d2abd87549bf9.VJCZ4YqHYQz3a6wD",
            "User-Agent": "Apifox/1.0.0 (https://apifox.com)",
            "Content-Type": "application/json"
        }
        ranked_candidates = []
        # 超过限制则对其进行截断
        if len(query.split()) >= 3000:
            query = " ".join(query.split()[:3000])

        candidates_chunk = [candidates[i:i+10] for i in range(0, len(candidates), 10)]

        def chunk_rerank(candidates_chunk, query):
            # 预处理candidate, 限制单条信息的长度
            post_candidates = []

            for c in candidates_chunk:
                if len(c.split()) >= 3000:
                    post_candidates.append(" ".join(query.split()[:3000]))
                else:
                    post_candidates.append(c)

            data = {
                "request_id": "1111111111",
                "query": query,
                "top_n": len(post_candidates),
                "documents": post_candidates,
                "return_documents": True,
                "return_raw_scores": True
            }

            response = requests.post(url, headers=headers, json=data)

            # 打印响应内容
            # print(response.json())
            try:
                text = response.json()['results']
            except:
                print(len(query.split()))
                print("---------------")
                print(len(candidates))
                for g in candidates:
                    print(len(g.split()))
                print("---------------")
                for g in post_candidates:
                    print(len(g.split()))
                print(response.json())
                return []

            return response.json()['results']

        with ThreadPoolExecutor() as executor:
            results = executor.map(
                lambda c: chunk_rerank(c, query),
                candidates_chunk
            )
        for res in results:
            ranked_candidates.extend(res)
        return ranked_candidates


    def parser_text(self, text: str):
        json_data = json.loads(text)
        return json_data

    def get_chunks_from_query(self, query, num):
        params = {
            "query": query,
            "top_k": num,
        }
        url = self.base_url + 'search_papers'
        resp = requests.get(url=url, params=params)
        text = self.parser_text(resp.text)
        return text

    def get_papers_from_query(self, query, num):
        params = {
            "title": query,
            "top_k": num,
        }
        url = self.base_url + 'query_by_title_contain'
        resp = requests.get(url=url, params=params)
        text = self.parser_text(resp.text)
        return text

    # G
    def get_titles_from_citations(self, citations):
        # q = self.get_embeddings_documents(citations)
        # ids = self.batch_search(q,1, True)
        # print("idsss:", ids)
        # return [_[0] for _ in ids]
        params = {
            "query": citations,
            "top_k": 1,
        }
        url = self.base_url + 'search_papers'
        resp = requests.get(url=url, params=params)
        text = self.parser_text(resp.text)
        return text


    #changed
    def get_chunks_from_queries(self, queries, num):
        merged_chunks = {}
        chunks_list = []
        results = []
        # 收集所有chunk的信息
        with ThreadPoolExecutor() as executor:
            results = executor.map(
                lambda q: self.get_chunks_from_query(q, num),
                queries
            )
        for res in results:
            chunks_list.extend(res)
        for chunk in chunks_list:
            entity = chunk['entity']
            key = (entity['paper_id'], entity['chunk_id'])  # 唯一标识
            # 修改chunk text为markdown格式
            chunk_text = entity['chunk_text']
            chunk_text = chunk_text.replace("\n", "<br>")
            chunk_text = html2text(chunk_text, bodywidth=0)
            entity['chunk_text'] = chunk_text
            if key not in merged_chunks:
                # 初始化chunk信息
                merged_chunks[key] = {
                    'count': 0,
                    'distances': [],
                    'relevance_score': [],
                    'date': entity['year'],
                    'chunk_data': entity  # 保存chunk副本以便修改
                }
            merged_chunks[key]['count'] += 1
            merged_chunks[key]['distances'].append(chunk['distance'])


        ranked_merged_chunks = self.rerank_ref(merged_chunks)
        #ranked_merged_chunks = dict(islice(ranked_merged_chunks.items(), 10))
        chunks_texts_list = [i["chunk_data"]["chunk_text"] for i in ranked_merged_chunks.values()]

        '''def process_rerank(q, chunks_texts_list, ranked_merged_chunks):
            rerank_chunks = self.rerank(q, chunks_texts_list)
            rerank_chunks_copy = copy.deepcopy(rerank_chunks)
            for k, v in ranked_merged_chunks.items():
                flag = 0
                for c in rerank_chunks_copy:
                    if c['document'] in v['chunk_data']['chunk_text']:
                        flag = 1
                        with lock:
                            v['relevance_score'].append(c['relevance_score'])
                        rerank_chunks_copy.remove(c)
                        break
                if flag == 0:
                    with open(r'd.txt', mode='w', encoding='utf-8') as f:
                        f.write(str(c))
                    with open(r'e.txt', mode='w', encoding='utf-8') as f:
                        f.write(str(ranked_merged_chunks))
                    exit()
            print(len(rerank_chunks_copy))'''
        '''with ThreadPoolExecutor() as executor:
            executor.map(
                lambda q: process_rerank(q, chunks_texts_list, ranked_merged_chunks),
                queries
            )
        '''

        for q in queries:
            rerank_chunks = self.rerank(q, chunks_texts_list)
            rerank_chunks_copy = copy.deepcopy(rerank_chunks)
            for k, v in ranked_merged_chunks.items():
                flag = 0
                for c in rerank_chunks_copy:
                    if c['document'] in v['chunk_data']['chunk_text']:
                        flag = 1
                        v['relevance_score'].append(c['relevance_score'])
                        rerank_chunks_copy.remove(c)
                        break
                if flag == 0:
                    with open(r'd.txt', mode='w', encoding='utf-8') as f:
                        f.write(str(c))
                    with open(r'e.txt', mode='w', encoding='utf-8') as f:
                        f.write(str(ranked_merged_chunks))
                    # exit()
            #print(len(rerank_chunks_copy))
        return ranked_merged_chunks


    def process_query_without_distance(self, q, num, merged_chunks):
        chunks_list = self.get_papers_from_query(q, num)
        for chunk in chunks_list:
            key = (chunk['paper_id'], chunk['chunk_id'])  # 唯一标识
            # 修改chunk text为markdown格式
            chunk_text = chunk['chunk_text']
            chunk_text = chunk_text.replace("\n", "<br>")
            chunk_text = html2text(chunk_text, bodywidth=0)
            chunk['chunk_text'] = chunk_text
            with lock:
                if key not in merged_chunks:
                    # 初始化chunk信息
                    merged_chunks[key] = {
                        'count': 0,
                        'distances': [],
                        'relevance_score': [],
                        'date': chunk['year'],
                        'chunk_data': chunk  # 保存chunk副本以便修改
                    }
                merged_chunks[key]['count'] += 1

    #changed
    def get_papers_from_queries(self, ori_query, queries, num):
        merged_chunks = {}
        chunks_list = []
        results = []
        # 收集所有chunk的信息
        with ThreadPoolExecutor() as executor:
            results = executor.map(
                lambda q: self.get_papers_from_query(q, num),
                queries
            )
        for res in results:
            chunks_list.extend(res)
        for chunk in chunks_list:
            key = (chunk['paper_id'], chunk['chunk_id'])  # 唯一标识
            # 修改chunk text为markdown格式
            chunk_text = chunk['chunk_text']
            chunk_text = chunk_text.replace("\n", "<br>")
            chunk_text = html2text(chunk_text, bodywidth=0)
            chunk['chunk_text'] = chunk_text
            if key not in merged_chunks:
                # 初始化chunk信息
                merged_chunks[key] = {
                    'count': 0,
                    'distances': [],
                    'relevance_score': [],
                    'date': chunk['year'],
                    'chunk_data': chunk  # 保存chunk副本以便修改
                }
            merged_chunks[key]['count'] += 1

        if len(merged_chunks) == 0:
            return merged_chunks

        ranked_merged_chunks = self.rerank_ref(merged_chunks)
        #ranked_merged_chunks = dict(islice(ranked_merged_chunks.items(), 10))
        chunks_texts_list = [i["chunk_data"]["chunk_text"] for i in ranked_merged_chunks.values()]
        rerank_chunks = self.rerank(ori_query, chunks_texts_list)
        rerank_chunks_copy = copy.deepcopy(rerank_chunks)
        for k, v in ranked_merged_chunks.items():
            flag = 0
            for c in rerank_chunks_copy:
                if c['document'] in v['chunk_data']['chunk_text']:
                    flag = 1
                    v['relevance_score'].append(c['relevance_score'])
                    rerank_chunks_copy.remove(c)
                    break
            if flag == 0:
                with open(r'd.txt', mode='w', encoding='utf-8') as f:
                    f.write(str(c))
                with open(r'e.txt', mode='w', encoding='utf-8') as f:
                    f.write(str(ranked_merged_chunks))
                # exit()
        #print(len(rerank_chunks_copy))

        return ranked_merged_chunks


    #changed
    def rerank_ref(self, merged_chunks):
        shuffled_keys = random.sample(list(merged_chunks.keys()), len(merged_chunks))
        # 创建一个新的字典，按照打乱后的顺序插入键值对
        merged_chunks = {key: merged_chunks[key] for key in shuffled_keys}

        #处理有效日期
        valid_dates = [v['date'] for v in merged_chunks.values() if v['date'] != 0]
        max_date = max(valid_dates)
        min_date = min(valid_dates)

        # 计算最大出现次数
        all_count = [v['count'] for v in merged_chunks.values()]
        max_count = max(all_count)
        min_count = min(all_count)

        #if len(list(merged_chunks.values())[0]['distances']) != 0:
        if any(len(chunk['distances']) != 0 for chunk in merged_chunks.values()):
            all_distance_scores = [sum(data['distances']) / len(data['distances']) for data in merged_chunks.values() if len(data['distances']) > 0]
            # 计算最小值和最大值（用于 Min-Max 归一化）
            min_distance = min(all_distance_scores)
            max_distance = max(all_distance_scores)
        '''else:
            all_distance_scores = None'''

        if len(list(merged_chunks.values())[0]['relevance_score']) != 0:
            all_relevance_scores = [sum(data['relevance_score']) / len(data['relevance_score']) for data in merged_chunks.values() if len(data['distances']) > 0]
            min_relevance = min(all_relevance_scores)
            max_relevance = max(all_relevance_scores)
        else:
            all_relevance_scores = None


        # 归一化函数
        def min_max_normalize(value, min_val, max_val):
            if max_val == min_val:  # 避免除零错误
                return 0.5  # 返回中性值
            return (value - min_val) / (max_val - min_val)

        # 计算每个chunk的综合得分
        for key in merged_chunks:
            data = merged_chunks[key]

            # 计算relevance得分（平均值）
            if all_relevance_scores:
                relevance_score = sum(data['relevance_score']) / len(data['relevance_score'])
                # 归一化得分
                normalized_relevance = min_max_normalize(relevance_score, min_relevance, max_relevance)

            # 计算出现次数得分（归一化）
            count_score = min_max_normalize(data['count'], min_count, max_count)

            # 计算日期得分（归一化时间戳）
            date_obj = data['date']
            date_score = min_max_normalize(date_obj, min_date, max_date)

            # 计算distance得分（平均值）
            if len(data['distances']) > 0:
                distance_score = sum(data['distances']) / len(data['distances'])
                normalized_distance = min_max_normalize(distance_score, min_distance, max_distance)
                # 综合加权得分（可根据需要调整权重比例）
                if all_relevance_scores:
                    total_score = 0.1 * normalized_distance + 0.1 * count_score + 0.1 * date_score + 0.7 * normalized_relevance
                else:
                    total_score = 0.6 * normalized_distance + 0.2 * count_score + 0.2 * date_score
            elif all_relevance_scores:
                total_score = 0.1 * count_score + 0.1 * date_score + 0.8 * normalized_relevance
            else:
                total_score = 0.5 * count_score + 0.5 * date_score

            data['total_score'] = total_score


        # 按综合得分升序排序
        sorted_chunks = sorted(merged_chunks.values(), key=lambda x: x['total_score'], reverse=True)

        # 返回排序后的chunk列表
        if all_relevance_scores:
            return [item['chunk_data'] for item in sorted_chunks]
        else:
            ordered_dict = {}
            for chunk in sorted_chunks:
                # 找到 chunk 对应的原始键
                key = next(k for k, v in merged_chunks.items() if v == chunk)
                ordered_dict[key] = chunk
            return ordered_dict

    #changed
    def merge_references(self, dict1, dict2):
        merged = {}
        # 遍历所有键
        for key in set(dict1.keys()).union(dict2.keys()):
            merged[key] = {
                'count': 0,
                'distances': [],
                'relevance_score': [],
                'date': None,
                'chunk_data': None
            }
            # 合并 count
            if key in dict1:
                merged[key]['count'] += dict1[key].get('count', 0)
            if key in dict2:
                merged[key]['count'] += dict2[key].get('count', 0)

            # 合并 relevance_score
            if key in dict1:
                merged[key]['relevance_score'].extend(dict1[key].get('relevance_score', []))
            if key in dict2:
                merged[key]['relevance_score'].extend(dict2[key].get('relevance_score', []))

            # 合并 distances
            if key in dict1:
                merged[key]['distances'].extend(dict1[key].get('distances', []))
            if key in dict2:
                merged[key]['distances'].extend(dict2[key].get('distances', []))

            # 合并 date 和 chunk_data（直接覆盖）
            if key in dict1:
                merged[key]['date'] = dict1[key].get('date')
                merged[key]['chunk_data'] = dict1[key].get('chunk_data')
            if key in dict2:
                merged[key]['date'] = dict2[key].get('date')
                merged[key]['chunk_data'] = dict2[key].get('chunk_data')

        return merged
    
    # 从ID获取标题
    def get_title(self, papers):
        titles = [i['entity']['paper_title'] for i in papers]
        return titles

    def parse_expression(self, expr_str):
        # 替换布尔运算符为sympy支持的符号
        expr_str = re.sub(r'\bAND\b', '&', expr_str, flags=re.IGNORECASE)
        expr_str = re.sub(r'\bOR\b', '|', expr_str, flags=re.IGNORECASE)

        try:
            expr = sympify(expr_str, evaluate=False)
        except Exception as e:
            raise ValueError("无效的布尔表达式") from e

        dnf_expr = to_dnf(expr, simplify=True)

        # 提取所有合取项
        if dnf_expr.func == Or:
            terms = dnf_expr.args
        else:
            terms = [dnf_expr]

        variables_list = []
        for term in terms:
            if term.func == And:
                vars_in_term = sorted([str(arg) for arg in term.args], key=lambda x: x.upper())
            else:
                vars_in_term = [str(term)]
            variables_list.append(' '.join(vars_in_term))

        return variables_list


    # 从ID获取摘要
    '''def get_abs(self, ids):
        abs_l = [r['entity']['chunk_text'] for r in ids]
        return abs_l'''
    
    
if __name__ == "__main__":

    db = database()
    LIST = db.parse_expression("ALCD AND (DLS OR (ANFF AND DFLJ)) AND DLSJF")
    print(LIST)
