from jieba import cut_for_search  
import math  
import re  
import pandas as pd  
import time  
from Search.getdata import word_s, word_s_title, TF, IDF, TF_Title, IDF_Title, information, page_rank  
from datetime import datetime,timedelta


def calculate_tf(word_list, document_content):  
    term_frequency = dict.fromkeys(word_list, 0)  
    for word in document_content:  
        if word in word_list:  
            term_frequency[word] += 1  
    for word, count in term_frequency.items():  
        term_frequency[word] = math.log10(count + 1)  
    return term_frequency  

def calculate_tfidf(tf_dict, idf_dict):  
    tfidf_scores = {}  
    for word, count in tf_dict.items():  
        tfidf_scores[word] = count * idf_dict.get(word, 0)   
    return tfidf_scores  

def calculate_vector_length(vector):  
    if not vector:  
        return 0  
    length = 0  
    for i in range(len(vector)):  
        length += vector[i][1] ** 2  
    return round(math.sqrt(length), 2)  

def base_search(query: str, search_titles_only: bool = False, top_n: int = 100, history: list = None):
    regex = r'[\.\^\$\*\+\?\{\}\[\]\|\(\)]'
    original_query = query
    is_regex = re.search(regex, query)
    if is_regex is not None:
        query = re.sub(regex, '', query)
    tokenized_query = sorted(list(cut_for_search(query)))
    tokenized_query = [term for term in tokenized_query if term not in ["", " "]]

    if search_titles_only:
        tf_dict = TF_Title
        idf_dict = IDF_Title
        word_list = word_s_title
    else:
        tf_dict = TF
        idf_dict = IDF
        word_list = word_s

    tfidf_scores_dict = {}
    for doc_id, tf_values in tf_dict.items():
        tfidf_scores_dict[doc_id] = calculate_tfidf(tf_values, idf_dict)

    query_tf = calculate_tf(word_list, tokenized_query)
    query_tfidf = calculate_tfidf(query_tf, idf_dict)
    query_vector = sorted(query_tfidf.items(), key=lambda item: item[1], reverse=True)[:top_n]
    query_vector_length = calculate_vector_length(query_vector)

    if query_vector_length == 0:
        raise KeyError("No valid keywords found.")

    search_results = []
    for doc_id in tfidf_scores_dict:
        temp_vector = tfidf_scores_dict[doc_id]
        similarity_score = 0
        for term, score in query_vector:
            if score != 0:
                for word, value in temp_vector.items():
                    if term == word:
                        similarity_score += score * value

        similarity = round(similarity_score / (query_vector_length * calculate_vector_length(list(temp_vector.items()))), 4)
        if similarity > 0:
            search_results.append((doc_id, similarity))

    search_results = sorted(search_results, key=lambda item: item[1], reverse=True)

    # 根据历史进行个性化优化
    if history:
        history_results_dict = {}
        for doc_id, similarity in search_results:
            length = 0
            temp_vector = tfidf_scores_dict[doc_id]
            for term, score in query_vector:
                if score != 0:
                    for word, value in temp_vector.items():
                        if term == word:
                            length += score * value

            history_similarity = round(length / (query_vector_length * calculate_vector_length(list(temp_vector.items()))), 4)
            history_results_dict[doc_id] = round(0.8 * similarity + 0.2 * history_similarity, 4)  

        # 根据个性化结果重新排序
        search_results = sorted(history_results_dict.items(), key=lambda item: item[1], reverse=True)

    filtered_results = []
    for result in search_results:
        doc_id = result[0]
        doc_info = information.loc[doc_id]

        title_match = re.search(original_query, str(doc_info.title)) is not None
        description_match = re.search(original_query, str(doc_info.description)) is not None
        matches_found = title_match or description_match

        if is_regex is not None:
            wildcard_match = (
                re.search(original_query, str(doc_info.title)) is not None or
                re.search(original_query, str(doc_info.description)) is not None or
                re.search(original_query, str(doc_info.content)) is not None
            )
            if wildcard_match:
                filtered_results.append((result[0], result[1]))
        elif matches_found:
            filtered_results.append((result[0], result[1]))

    return filtered_results

def test_base_search(query: str):  
    start_time = time.time()  
    results = base_search(query, True)  
    end_time = time.time()  
    print("Search results:")  
    for item in results:  
        print(item)  
    print(f"Response time: {end_time - start_time:.2f} seconds, returned {len(results)} results.")  

def expand_search_results(search_results: list):  
    expanded_list = []  
    for result in search_results:  
        doc_id = result[0]  
        doc_info = information.loc[doc_id].fillna('')  
        title = str(doc_info['title']).replace("_", "/")  
        description = str(doc_info['description'])  

        score = result[1] * 0.7 + 0.3 * page_rank.loc[doc_id]['page_rank']  
        expanded_list.append((title, doc_id, description, score))  

    return sorted(expanded_list, key=lambda item: item[-1], reverse=True)  

def test_expand_search_results(query: str, file_df):  
    search_results = base_search(query, True)  
    expanded_results = expand_search_results(search_results)  
    for result in expanded_results:  
        print(result)  

#完全匹配 部分匹配
def phrase_match(result,input,complete=True):
    row = information.loc[result[1]]
    text = f"{row['title']}#{row['description']}#{row['content']}#{row['editor']}"
    ls = str(input).split(" ")
    for word in ls:
        if word == '#':
            pass
        if word not in text:
            if complete == True:
                return False
        if word in text:
            if complete == False:
                return True
    if complete == True:
        return True
    return False

#不包含
def not_include(result,input):
    row = information.loc[result[1]]
    text = f"{row['title']}#{row['description']}#{row['content']}#{row['editor']}"
    ls = str(input).split(" ")
    ls = [word for word in ls if word != '']
    for word in ls:
        if word == '#':
            pass
        if word in text:
            return False
    return True

def load_file_links_from_csv(file_path):  
    """  
    Load file links from a CSV file and return as a DataFrame.  

    Parameters:  
    file_path (str): The path to the CSV file.  

    Returns:  
    DataFrame: A DataFrame containing file links.  
    """  
    return pd.read_csv(file_path)  

def search_file_link(file_df, query):  
    """  
    Search for a file link based on the query in the DataFrame.  

    Parameters:  
    file_df (DataFrame): The DataFrame containing file links.  
    query (str): The query to search for.  

    Returns:  
    dict: A dictionary with link and description if found, else None.  
    """  
     # 处理缺失值  
    file_df = file_df.dropna(subset=['title'])  
    filtered_files = file_df[file_df['title'].str.contains(query, case=False)]  
    if not filtered_files.empty:  
        return filtered_files.iloc[0].to_dict()  
    return None  

def test_file_link_search(query, file_df):  
    file_link = search_file_link(file_df, query)  
    if file_link:  
        print(f"文件链接: {file_link['url']} - {file_link['title']}")  
    else:  
        print("未找到匹配的文件链接。")  

def satisfy_time(result,limit):

    row = information.loc[result[1]]

    if str(row['date_timestamp']) != "nan":

        articleTime = datetime.fromtimestamp(int(row['date_timestamp']))
        res = datetime.now() - articleTime
        if limit == "一个月内":
            if res > timedelta(days=30):
                return False
        elif limit == "一年内":
            if res > timedelta(days=365):
                return False
        elif limit == "五年内":
            if res > timedelta(days=365*5):
                return False

    if str(row['date_timestamp']) == "nan":
        return False
    return True

def satisfy_time_test(input,limit):
    ret = base_search(input,[])
    expanded = expand_search_results(ret)
    print("时间限制添加前共有"+str(len(expanded))+"条结果：")
    for item in expanded:
        print(item)
    expanded = [item for item in expanded if satisfy_time(item,limit)==True]
    print("时间限制添加后共有"+str(len(expanded))+"条结果：")
    for item in expanded:
        print(item)


def satisfy_website(result,name):
    if name not in result[1]:
        return False
    return True

def satisfy_website_test(input,name):
    ret = base_search(input,[])
    expanded = expand_search_results(ret)
    print(f"网站或域名限制前共有{len(expanded)}条结果：")
    for item in expanded:
        print(item)
    expanded = [item for item in expanded if satisfy_website(item,name)==True]
    print(f"网站或域名限制后共有{len(expanded)}条结果：")
    for item in expanded:
        print(item)

output_csv_file = './data/file_links.csv'  # CSV 文件的路径  
file_df = load_file_links_from_csv(output_csv_file)  
if __name__ == "__main__":  
    # Step 1: Load file links from CSV  
    output_csv_file = './data/file_links.csv'  # CSV 文件的路径  
    file_df = load_file_links_from_csv(output_csv_file)  

    # Step 2: Test search function  
    test_query = "附件南开大学百年校庆LOGO、主题歌、标语口号、吉祥物征集作品报名表.docx"  # 示例查询  
    test_file_link_search(test_query, file_df)