import os
import pandas as pd
import math
import matplotlib.pyplot as plt
import numpy as np

from pylab import mpl
plt.rcParams['font.sans-serif'] = 'SimHei'
mpl.rcParams['font.sans-serif'] = ['SimHei']
mpl.rcParams['axes.unicode_minus']=False
import networkx as nx
import torch as T
from sentence_transformers import util
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from rank_bm25 import BM25Okapi
from utlis import path_handle, use_llm_api, tokenize2stw_remove



def average_content_vecs_by_path(group, mat):
    indices = group.index.tolist()  # Get indices of rows for the current group
    vectors = mat[indices]  # Select corresponding vectors from the matrix
    return np.mean(vectors, axis=0)


def add_files_to_tree(tree, root_path, record_level, current_level=1):
    # root = path_handle(root_path, 'split')[0] + '/' 
    for key, value in tree.items():
        current_path = os.path.join(root_path, key)
        if os.path.isdir(current_path):
            if current_level >= record_level:  # Only add files if level is 3 or greater
                for file in os.listdir(current_path):
                    if os.path.isfile(os.path.join(current_path, file)):
                        value[file] = {}
            # Recurse into sub-directories even if we're not adding files yet
            add_files_to_tree(value, current_path, record_level, current_level+1)
            

def bfs_level_searching(inten_lst, level_nodes, TOP_K, tokenizer, model):
    level_vecs = np.array([v['embedding'] for k, v in level_nodes.items()])
    level_keys = [k for k in level_nodes.keys()]
    
    nodes2keep = []
    intention, q_vector = vectorize_texts(','.join(inten_lst), tokenizer, model)
    sim_contents, ids_, similarities, _ = find_closest(level_keys, level_vecs, q_vector, TOP_K)
    # print(level_keys, ' ', similarities)
    
    nodes2keep.extend(sim_contents) 
    nodes2keep = list(set(nodes2keep))
    return nodes2keep


def bfs_filtering(inten_lst, tree, TOP_K, tokenizer, model):
    cut_bfs_tree = {}
    level_nodes = {k: v for k, v in tree.items() if isinstance(v, dict) and 'sub_tree' in v}
    nodes2keep = bfs_level_searching(inten_lst, level_nodes, TOP_K, tokenizer, model)
    print('bfs selected nodes: ', nodes2keep)
    for key in nodes2keep:
        value = tree[key]
        if isinstance(value['sub_tree'], dict) and not value['sub_tree']=={}:
            value['sub_tree'] = bfs_filtering(inten_lst, value['sub_tree'], TOP_K, tokenizer, model)
        cut_bfs_tree[key] = value
    
    return cut_bfs_tree


def bfs_reverse(tree):
    inversed_tree = {}
    for key, value in tree.items():
        if 'sub_tree' in value and isinstance(value['sub_tree'], dict):
            value['sub_tree'] = bfs_reverse(value['sub_tree'])
        inversed_tree[key] = value['sub_tree']
    return inversed_tree


def detect_file_type(path_):
    def detect_(path_, extensions):
        _, extension = os.path.splitext(path_)
        return extension.lower() in extensions
    
    res_type = ''
    extension_dic = {
        'table' : ['.csv', '.xls', '.xlsx']
        # add other type and lists
        }
    
    for ex_key, extensions in extension_dic.items():
        bool_ = detect_(path_, extensions)
        if bool_==True:
            res_type = ex_key
            break
    return res_type


def find_closest(texts, text_vectors, q_vec, topk, USER_SETTINGS=None, msg=None, add_identifiers=None, hybrid=True, stopwords=None, token_corpus=None, threshold=0, rerank=None):
    def find_cutoff(sorted_list, threshold):
        if threshold<=0:
            return sorted_list[-1], len(sorted_list)
        else:
            for i, number in enumerate(sorted_list):
                if number <= threshold:
                    return (sorted_list[i-1], i) if i > 0 else (sorted_list[0], 1)  # Return element and position
            return sorted_list[-1], len(sorted_list)

    semantic_scores = util.cos_sim(q_vec, text_vectors)
    semantic_scores = list(semantic_scores.detach().numpy().reshape(-1))

    if hybrid==True:
        msg_tokens = tokenize2stw_remove([msg], stopwords)
        msg_tokens = msg_tokens[-1].split('->')
        bm25 = BM25Okapi(token_corpus)
        kw_scores = bm25.get_scores(msg_tokens)
    else:
        kw_scores = np.zeros((len(texts)), dtype=np.float32)
        
    kw_scores = [math.log(1 + s) for s in kw_scores]
    hybrid_scores = np.array(semantic_scores) + kw_scores
    
    sim_ids = list(np.argsort(hybrid_scores)[::-1])
    hybrid_scores = list(np.sort(hybrid_scores)[::-1])
    cut_score, cut_idx = find_cutoff(hybrid_scores, threshold)
    
    sim_ids = sim_ids[:cut_idx][:topk]
    hybrid_scores = hybrid_scores[:cut_idx][:topk]
    sim_contents = [texts[i] for i in sim_ids][:topk] # here note the sim_contents can be paths or textual contents
    
    # implement reranking
    if not rerank==None:
        searching_df = pd.DataFrame({'sim_id':sim_ids, 'score':hybrid_scores, 'contents':sim_contents})
        rerank_res = rerank_(sim_contents, msg=msg, topk=topk, ranker_path=USER_SETTINGS['RERANKER_PATH'], mode=rerank)
        sim_contents = rerank_res.values.reshape(-1).tolist()
        searching_df = searching_df.set_index('contents').loc[sim_contents].reset_index()
        
        hybrid_scores = list(searching_df['score'])
        sim_ids = list(searching_df['sim_id'])
        sim_contents = list(searching_df['contents'])
    
    if not add_identifiers==None:
        add_identifiers = [add_identifiers[i] for i in sim_ids][:topk]
    return sim_contents, sim_ids, hybrid_scores, add_identifiers

  
def find_by_content_voting(msg, q_vector, all_contents_df, all_vec, topk, USER_SETTINGS, max_candidates=200, mode='normal', hybrid=True, stopwords=None, token_corpus=None, rerank=None):
    all_contents = all_contents_df['content'].tolist()
    all_paths = all_contents_df['path'].tolist()
    
    sim_contents, sim_ids, similarities, sim_paths = find_closest(all_contents, all_vec, q_vector, max_candidates, USER_SETTINGS, msg=msg, add_identifiers=all_paths, hybrid=hybrid, stopwords=stopwords, token_corpus=token_corpus, rerank=rerank)
    # sorted_group_paths = sim_paths[ : topk]
    
    sim_df = pd.DataFrame({'content':sim_contents, 'sim_id':sim_ids, 'similarity':similarities, 'path':sim_paths})
    group_res = dict(tuple(sim_df.groupby('path')))
    group_vote_dic = dict()
    
    avg_threshold = np.mean(similarities)
    for gid, sub_df in group_res.items():
        vote = voting(sub_df, avg_threshold, mode)
        group_vote_dic.update({gid:vote})
    
    sorted_group_paths = sorted(group_vote_dic, key=lambda k:group_vote_dic[k], reverse=True)
    sorted_group_paths = sorted_group_paths[ : topk]
    return sorted_group_paths

        
def local_graph(root_node, nodes, show_root=False, **kwargs):
    G = nx.DiGraph()
    edge_labels = {}
    nodes = [ str(nodes.index(node) + 1) +' '  + node.split('.')[0] for node in nodes ]
    if show_root:
        G.add_node(root_node)
        edge_name = kwargs['edge_name']
        
    # draw tree
    for n, node in enumerate(nodes):
        G.add_node(node)
        if show_root:
            G.add_edge(root_node, node)
            edge_labels.update( { (root_node, node) : edge_name } )
            
    # draw chain
    edge_name = kwargs['link_name']
    for i in range(len(nodes) - 1):
        G.add_edge(nodes[i], nodes[i+1])
        edge_labels.update({(nodes[i], nodes[i+1]) : edge_name})

    if show_root:          
        sizes = [1200] + [800] * len(nodes)
    else:
        sizes = [800] * len(nodes)
                
    colors = plt.cm.Pastel2(np.linspace(0, 1, G.number_of_nodes()))
    pos = nx.spring_layout(G)
    
    nx.draw(G, pos, with_labels=True, node_color=colors, node_size=sizes)
    nx.draw_networkx_edge_labels(G, pos, edge_labels=edge_labels)
    
    plt.show()
    return G
    

def merge_cloest_soft(*lists, lst_weights=None, nonexist_panelty=False):
    final_weights = {}
    for i, lst in enumerate(lists):
        for idx, elem in enumerate(lst):
            weight = (len(lst) - idx) * lst_weights[i]
            final_weights[elem] = final_weights.get(elem, 0) + weight
    
    if nonexist_panelty==True:
        all_elems = set().union(*lists)
        for elem in all_elems:
            if elem not in final_weights:
                final_weights[elem] = -1
    # sort results by weights
    merged_paths = sorted(final_weights, key=lambda k:final_weights[k], reverse=True)
    return merged_paths
    

def rerank_(*lsts, msg, topk, ranker_path, llm_name=None, mode='m3'):
    rerank_df = pd.DataFrame({i+1: lst for i, lst in enumerate(lsts)})
    
    if mode=='m3':
        rr_pairs = []
        for pth in rerank_df[1].tolist():
            rr_pairs.append([msg, pth])
        
        rr_tokenizer = AutoTokenizer.from_pretrained(ranker_path)
        rr_model = AutoModelForSequenceClassification.from_pretrained(ranker_path)
        rr_model.eval()

        with T.no_grad():
            inputs = rr_tokenizer(rr_pairs, padding=True, truncation=True, return_tensors='pt', max_length=512)
            scores = rr_model(**inputs, return_dict=True).logits.view(-1, ).float().cpu().numpy()
        
        sorted_combined = sorted(list(zip(rr_pairs, scores)), key=lambda x: x[1], reverse=True)
        sorted_lst = [item[0][1] for item in sorted_combined]
    
    elif mode=='llm' and not llm_name==None:
        llm_pairs = []
        for i, pth_val in enumerate(paths4ranks.values()):
            llm_pairs.append((f"【{i}】"+pth_val))
        
        rerank_dic, llm_histories = use_llm_api(llm_name,
                                        histories=[],
                                        paras={ 'task':'rerank', 
                                                'query': msg, 
                                                'texts': llm_pairs,
                                                'length' : len(llm_pairs)},
                                        config=model_config,
                                        settings=USER_SETTINGS,
                                        record_his=False)
        
    order_mapping = {v:i for i,v in enumerate(sorted_lst)}
    rerank_df['order'] = rerank_df[1].map(order_mapping)
    rerank_df = rerank_df.sort_values(by='order').reset_index(drop=True).drop(columns=['order'])
    return rerank_df.head(topk)


def voting(df, avg_threshold, mode='normal'):
    similarities = np.array(df['similarity'].values)
    differences = similarities - avg_threshold
    
    if mode=='normal':
        votes = differences
    elif mode=='relu':
        votes = np.maximum(0, differences)
    elif mode=='exp':
        votes = np.where(differences > 0, np.exp(differences), differences)
    else:
        pass

    vote = np.sum(votes)
    return vote
                 
                                
                                            
    

    
    
    

    



    
    
    
    
    
    
    
    
    
    
    



