import re
import string
import os
import time
import jieba
import hashlib
import sys



class MyNote:
    def __init__(self, note_base_path):
        self.note_base_path = note_base_path
        self.notes = None
        # 初始化全局参数
        self.MAX_RESULT_NUM = 20

    # =======================================================================
    # 文件读取相关
    # 得到所有文件，并按照最后修改时间排序，最新的在前面
    def get_all_files(self, path, extension):
        file_paths = []
        for root, dirs, files in os.walk(path):
            for file in files:
                if file.split('.')[-1].lower() == extension:
                    file_path = os.path.join(root, file).replace("\\", "/")
                    file_paths.append((file_path, os.path.getmtime(file_path)))
        file_paths.sort(key=lambda x: x[1], reverse=True)  # 根据最后修改时间进行排序
        return file_paths

    def read_file(self, file_path):
        with open(file_path, 'r', encoding='utf-8') as file:
            content = file.read()
        return content
    
    def is_in_hidden_path(self, filepath):
        dirnames = os.path.dirname(filepath).split("/")
        for dirname in dirnames:
            dirname = dirname.strip().rstrip(".")
            if len(dirname) > 1 and dirname[0] == ".":
                return True
        return False

    def get_all_notes(self, note_base_path):
        notes = {}
        files = self.get_all_files(note_base_path, 'md')
        for file in files:
            file_path = file[0]
            if self.is_in_hidden_path(file_path):
                continue
            
            content = self.read_file(file_path)
            file_name = os.path.basename(file_path)  # 获取文件名（包含扩展名）
            title = os.path.splitext(file_name)[0]  # 去除扩展名部分
            hash_id = self.string_to_md5(title)
            paras = [para.strip() for para in content.split('\n')]
            paras = [para for para in paras if len(para)>1] # 过短的行不进行搜索，直接过滤掉
            note = {
                "id": hash_id,
                "title": title,
                "last_update_time": round(file[1]),
                "web_path": file_path.replace("\\", "/").split(note_base_path)[-1].strip("/"),
                "note_path": file_path,
                "content": content,
                "paragraphs": content.split('\n') #content.replace('。', '\n').replace('. ', '\n').split('\n')
            }
            notes[hash_id] = note
        return notes
    
    # 缓存笔记数据
    def cache_notes(self):
        self.notes = self.get_all_notes(self.note_base_path)

    # ===========================================================================
    # 算法相关
    # 思路：
    # 先读取所有笔记段落
    # 首先进行精确的正则匹配，如果没有匹配到，再将搜索词进行拆解。如果仍然没有匹配到，则进行模糊匹配

    def string_to_md5(self, string):
        md5_hash = hashlib.md5()
        md5_hash.update(string.encode('utf-8'))
        md5_value = md5_hash.hexdigest()
        return md5_value

    # 检查搜索字符串，返回1说明需要进行模糊搜索。当字符数量小于2的时候，则可以进行模糊搜索
    def check_search_text(self, search_text: str):
        num = 0
        charset = "" #string.digits+string.ascii_letters
        for c in search_text:
            if c not in charset:
                num += 1
            if num > 2:
                return True
        return False

    # 计算一个虚拟长度
    def get_virtual_search_len(self, search_text):
        l = 0
        charset = string.digits+string.ascii_letters
        for c in search_text:
            if c in charset:
                l += 0.7 # 英文单词太长，用来减少英文字符的权重
            else:
                l += 1
        return l

    # 令str1长度小于str2的长度。返回最大子串的长度+最长子序长度
    def max_sub_str_len(self, str1, str2):
        # 修改最长公共子序的代码逻辑
        str1 = str1.lower()
        str2 = str2.lower()
        m = len(str1)
        n = len(str2)
        # 创建一个二维数组来存储LCS的长度
        dp = [[0] * (n + 1) for _ in range(m + 1)]

        # 填充dp数组
        squence_len = 0 # 最大子序长度
        substring_len = 0 # 最大子串长度
        temp_len = 0
        best_i = 0
        best_j = 0
        for i in range(1, m + 1):
            for j in range(1, n + 1):
                if str1[i - 1] == str2[j - 1]:
                    dp[i][j] = dp[i - 1][j - 1] + 1
                    if dp[i][j] > squence_len:
                        squence_len = dp[i][j]
                        best_i = i # 直接找到最大子序长度匹配到的最后一个字符为止，减少回溯时间
                        best_j = j
                else:
                    dp[i][j] = max(dp[i - 1][j], dp[i][j - 1])
        i = best_i
        j = best_j
        temp_len = 0
        adhesion_len = 0 # 粘合度长度
        adhesion = 0 # 粘合度，说明最长子序匹配到的字符之间的接近程度
        temp_len2 = 0
        while i > 0 and j > 0:
            temp_len2 += 1
            if str1[i - 1] == str2[j - 1]:
                temp_len += 1
                i -= 1
                j -= 1
                adhesion_len += 1 / temp_len2
                temp_len2 = 0
                continue
            # 否则说明不再连续
            substring_len = max(substring_len, temp_len)
            temp_len = 0
            if dp[i - 1][j] == 0 and dp[i][j-1] == 0: # 如果填充等于0，说明已经匹配结束了
                break
            if dp[i - 1][j] > dp[i][j - 1]:
                i -= 1
            else:
                j -= 1
        if squence_len < 2:
            return squence_len
        adhesion = adhesion_len / squence_len # 计算粘合度
        discrete_len = best_j - j # 离散长度，即找到最长子序在原文中第一个字符到最后一个字符之间的字符长度
        dispersion = discrete_len / squence_len # 离散度，越大说明匹配度越差
        substring_len = max(substring_len, temp_len)
        score = squence_len * (adhesion / dispersion)**0.5
        # if "jinjia" in str1 and "nemanja" in str2:
        #     print(adhesion)
        #     print(dispersion)
        #     print(squence_len)
        return score #(squence_len * adhesion / dispersion + substring_len ) // 2 # 为了保证如果

    # 计算得分，并返回匹配最好的段落
    def compute_score(self, search_text, note):
        search_text_len = len(search_text)
        score = 0

        # 先计算title分数
        l = self.max_sub_str_len(search_text, note["title"])
        if l == search_text_len:
            return -1, [] # 如果全匹配，直接返回结果
        score = l

        # 再计算段落分数
        best_paras = []
        for p in note["paragraphs"]:
            l = self.max_sub_str_len(search_text, p)
            if l == search_text_len:
                return -2, [p] # 如果全匹配，则只返回这一个段落即可
            if l > score:
                score = l
                best_paras = [p] # 最好的段落暂时也只写这一条吧
        
        return score, best_paras

    # 模糊搜索
    def fuzz_search(self, search_text, notes):
        result = []
        prefect_title_result = []
        prefect_para_result = []
        for note in notes:
            if len(prefect_title_result) + len(prefect_para_result) + len(result) >= self.MAX_RESULT_NUM:
                break
            score, paras = self.compute_score(search_text, note)
            if score >= 0 and score < len(search_text) * 0.667:  # 匹配结果太差，就不添加到结果中，这样可以减小最终排序的时间。因为引入了粘合度的概念，因此这里的分值容忍度可以再低一些
                continue
            item = {
                "hash": note["id"],
                "title": note["title"],
                "best_paras": paras,
                "score": round(score / len(search_text) if score > 0 else score, 2)
            }
            if score == -1:
                prefect_title_result.append(item)
            elif score == -2:
                prefect_para_result.append(item)
            else:
                result.append(item)
        # 需要对结果进行排序，然后取最好的结果
        # result = quick_sort(result)
        result.sort(key=lambda x: x["score"], reverse=True)
        temp = self.MAX_RESULT_NUM - len(prefect_title_result) - len(prefect_para_result)
        if len(result) > temp:
            result = result[:temp]
        result = prefect_title_result + prefect_para_result + result
        return result

    # 精确搜索
    def accurate_search(self, search_text, notes, runtime):
        # 对search_text进行匹配，不过滤空格，如果标题全匹配，则分数为-1
        # 对search_text进行匹配，不过滤空格，如果段落全匹配，则分数为-2
        # 否则对关键词keywords进行匹配，过滤掉了空格，所有关键词不匹配，分数为0。
        # 再对关键词的匹配原则上，如果是用空格分开的关键词，权重正常，如果是分割的关键词又靠着jieba分解出来的关键词，则权重稍微低一些
        # 如果关键词在整个文章中匹配过，则也会相应加一点点分数
        expectation_score = 0.68
        search_len_limit = len(search_text) / 2
        keywords = search_text.split(" ")
        keywords_len = len(keywords)
        if len(search_text.replace(" ", "")) > 3:
            for k in keywords:
                fenjie = list(jieba.cut(k)) # 进行词义分解
                if len(fenjie) == 1:
                    continue
                for kk in fenjie:
                    # if len(kk) == 1: # 当关键词是一个字时，如果过滤掉的话，大概率会因为匹配度太低，后面启用模糊搜索了
                    #     continue
                    if kk not in keywords:
                        keywords.append(kk) # 将解析出来的新关键词添加到keywords中

        search_text_vir_len = self.get_virtual_search_len(search_text.replace(" ", "")) # 精确搜索不匹配空格，将其去掉，再计算虚拟长度
        keywords_vir_lens = [self.get_virtual_search_len(key) for key in keywords]
        print(keywords)
        result = []
        prefect_result = []
        for h,note in notes.items():
            if len(prefect_result) >= self.MAX_RESULT_NUM: # 如果精确结果已经达到上限，就不搜索了
                break
            score = 0
            best_paras = []
            note_matched_word = "" # 笔记总共匹配的关键词
            # 匹配标题
            title = note["title"].lower()
            if search_text in title:
                score = -1
            elif len(keywords) > 1:
                matched_word = ""
                for i in range(0, keywords_len):
                    key = keywords[i]
                    if key in title:
                        score += keywords_vir_lens[i]
                        matched_word += key + " "
                        if key not in note_matched_word:
                            note_matched_word += key + " "
                if score < search_text_vir_len * expectation_score: # 如果匹配结果还是不理想，则继续用jieba出来的关键词进行匹配
                    for i in range(keywords_len, len(keywords)):
                        key = keywords[i]
                        if key not in matched_word and key in title:
                            score += keywords_vir_lens[i] * 0.9 # 因为是靠后续分解出的关键词，这里将分值再降低一点
                            if key not in note_matched_word:
                                note_matched_word += key + " "
            # 匹配段落
            if score != -1:
                for p in note["paragraphs"]:
                    if len(p) <= search_len_limit: # 如果段落过短，则肯定匹配度低，就不进行匹配了
                        continue
                    p = p.lower()
                    if search_text in p:
                        best_paras = [p]
                        score = -2
                        break
                    elif len(keywords) > 1:
                        temp = 0
                        matched_word = ""
                        for i in range(0, keywords_len):
                            key = keywords[i]
                            if key in p:
                                temp += keywords_vir_lens[i]
                                matched_word += key + " "
                                if key not in note_matched_word:
                                    note_matched_word += key + " "
                        if temp < search_text_vir_len * expectation_score: # 如果匹配结果还是不理想，则继续用jieba出来的关键词进行匹配
                            for i in range(keywords_len, len(keywords)):
                                key = keywords[i]
                                if key not in matched_word and key in p:
                                    temp += keywords_vir_lens[i] * 0.9 # 因为是靠后续分解出的关键词，这里将分值再降低一点
                                    if key not in note_matched_word:
                                        note_matched_word += key + " "
                        if temp > score:
                            score = temp
                            best_paras = [p]
                # 最终分数再结合全文
                if score > 0:
                    score = score * 0.8 + self.get_virtual_search_len(note_matched_word.replace(" ", "")) * 0.2
            # 判断匹配结果
            if score >= 0 and score < search_text_vir_len * expectation_score: # 匹配结果太差
                if score >= 0.5: # 当有一定匹配度时，可以尝试模糊搜索
                    runtime["matchnote"].append(note)
                continue
            item = {
                "hash": h,
                "title": title,
                "best_paras": best_paras
            }
            if score == -1:
                item["score"] = score
                item["matched_keywords"] = [search_text]
                prefect_result.insert(0, item)
            elif score == -2:
                item["score"] = score
                item["matched_keywords"] = [search_text]
                prefect_result.append(item)
            else:
                item["score"] = round(score / search_text_vir_len, 2)
                item["matched_keywords"] = note_matched_word.strip().split(" ")
                result.append(item)
        # 返回搜索到的结果
        # result = quick_sort(result)
        result.sort(key=lambda x: x["score"], reverse=True)
        temp = self.MAX_RESULT_NUM - len(prefect_result)
        if len(result) > temp:
            result = result[:temp]
        result = prefect_result + result
        return result, keywords

    # 搜索函数
    def search(self, search_text):
        if len(search_text) > 50:
            search_text = search_text[:50]
        search_text = re.sub(r"\s+", " ", search_text) # 多个空白字符变成一个空格
        search_text = search_text.strip().lower()
        if len(search_text) == 0:
            return []
        runtime = {
            "matchnote": []
        }
        result, keywords = self.accurate_search(search_text, self.notes, runtime)
        if self.check_search_text(search_text):
            result2 = []
            if len(runtime["matchnote"]) > 0:
                result2 = self.fuzz_search(search_text.replace(" ", ""), runtime["matchnote"]) # 既然精确搜索有空格没有搜到，大概率模糊搜索的时候空格也没啥用，就删掉，避免影响搜索效率，因为空格匹配不到会降低评分
            elif len(result) == 0:
                result2 = self.fuzz_search(search_text.replace(" ", ""), self.notes.values())
            for note in result2:
                note["matched_keywords"] = keywords # 这里为了和精确搜索返回结果一致，加上一个matched_keywords
            result = result + result2
            return result
        else:
            return result

# =======================================================================

if __name__ == '__main__':
    # 测试
    def search_test(search_text, mynotes):
        start_time = time.time()
        result = mynotes.search(search_text)
        end_time = time.time()
        execution_time = end_time - start_time
        print("执行时间：", execution_time, "秒")
        for item in result:
            print( "%s【%s】%s" % (item["score"], item["title"], "\n".join(item["best_paras"])) )
        print("==================================================================")


    note_base_path = "E:/Note"
    if len(sys.argv) > 1:
        note_base_path = sys.argv[1]
    mynotes = MyNote(note_base_path)
    mynotes.cache_notes()

    # 搜索测试
    # 精确匹配
    search_test("minicom", mynotes)
    search_test("webshell", mynotes)
    search_test("树莓派模组拨号", mynotes)
    # 模糊匹配
    search_test("linux字符串操作", mynotes)
    search_test("快速开启Web", mynotes)
    search_test("开启Web", mynotes)
    # 可以匹配错误拼写
    search_test("威力同步", mynotes)
    search_test("webshall", mynotes)
    search_test("jinjia", mynotes)