# D:\code\pythonProject1\database
# youth
import ttkbootstrap
import os
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
import tkinter as tk
from tkinter import scrolledtext
import jieba


class SearchEngineGUI:  # GUI界面
    def __init__(self):

        root = tk.Tk()
        self.root = root
        root.title("personal search engine 宋知行")
        self.root.geometry("800x500+1100+500")
        self.folder_path_label = tk.Label(self.root, text="文件夹路径:")
        self.folder_path_label.pack()
        self.folder_path_entry = tk.Entry(self.root, width=50)
        self.folder_path_entry.pack()
        self.search_query_label = tk.Label(self.root, text="检索内容: ")
        self.search_query_label.pack()
        self.search_query_entry = tk.Entry(self.root, width=50)
        self.search_query_entry.pack()
        self.search_button = tk.Button(self.root,
                                       text="检索",
                                       command=self.search)
        self.search_button.pack()
        self.results_path = scrolledtext.ScrolledText(self.root, width=70, height=15)
        self.results_path.pack()

    def read_files(self, folder_path):
        texts = {}
        for filename in os.listdir(folder_path):
            if filename.endswith(".txt"):
                with open(os.path.join(folder_path, filename), 'r', encoding='utf-8') as file:
                    content = file.read()
                    texts[filename]=content
        return texts

    def tokenize_text(self, texts):
        b=[]
        for text in texts.values():
            a=[]
            if text.isalpha():
                a.append(word_tokenize(text))
                b.append(a)
            else:
                a.append((jieba.lcut(text)))
                b.append(a)
        tokenized_texts = dict(zip(texts.keys(), b))
        return tokenized_texts

    # 2. 构建词典
    def dictionary(self, tokenized_texts):
        dictionary=tokenized_texts
        stop_words = set(stopwords.words('english'))
        stopwords_path = "D:\code\停用词.txt"
        with open(stopwords_path, 'r', encoding='utf-8') as file:
            for line in file:
                stop_words.add(line.strip())
        # 停用词列表
        for text in dictionary.values():
            for words in text:
                for word in words:
                    if word in stop_words:
                        #print(f"删除{word}\n")
                        del word
        print(dictionary)
                    # 记录单词出现的文件和位置
        return dictionary



    def get_key(self,val,dict):
        for key, value in dict.items():
            if val == value:
                return key




    def search(self):
        global file_path
        folder_path = self.folder_path_entry.get()
        query = self.search_query_entry.get()

        if not folder_path or not query:
            self.results_path.delete(1.0, tk.END)
            self.results_path.insert(tk.END, "请提供文件夹路径和检索内容。")
            return

        texts = self.read_files(folder_path)#texts是列表
        tokenized_texts = self.tokenize_text(texts)
        dictionary = self.dictionary(tokenized_texts)

        results = []
        file_names = []
        for key, value in dictionary.items():
            for words in value:
                for word in words:
                    if query in word:
                        file_names.append(key)
        file_names1=set(file_names)

        for file_name in file_names1:
            file_path =os.path.join(folder_path,file_name)  # 拼接文件路径
            results.append(file_path)  # 添加匹配结果
        self.results_path.delete(1.0, tk.END)
        print(results)

        if results:
            for result in results:
                file_path = result
                self.results_path.insert(tk.END, f"文件:{file_path}\n")
            return file_path
        else:
            self.results_path.insert(tk.END, "未找到匹配项。")

    # def search(self):
    #     folder_path = self.folder_path_entry.get()
    #     query = self.search_query_entry.get()
    #
    #     if not folder_path or not query:
    #         self.results_text.delete(1.0, tk.END)
    #         self.results_text.insert(tk.END, "请提供文件夹路径和检索内容。")
    #         return
    #
    #     texts = self.read_files(folder_path)
    #     tokenized_texts = self.tokenize_text(texts)
    #     dictionary=self.build_dictionary(tokenized_texts)
    #
    #     # print(f"texts:{texts}\n\n")
    #     # print(tokenized_texts)
    #     # print("-------------------------------------------------------------------------------------------------------------------")
    #     # aaa = self.build_dictionary(tokenized_texts)
    #     # print(aaa)
    #
    #     results = []
    #     for i, tokens in enumerate(tokenized_texts):
    #         if query in tokens:
    #             # 获取文件名
    #             file_path = folder_path  # 拼接文件路径
    #             results.append((file_path, texts[i]))
    #             print(results)# 添加匹配结果
    #     self.results_text.delete(1.0, tk.END)
    #
    #     if results:
    #         for result in results:
    #             file_path, match_text = result
    #             self.results_text.insert(tk.END, f"文件:\n {file_path}\n\n匹配内容:\n\n {match_text}\n")
    #     else:
    #         self.results_text.insert(tk.END, "未找到匹配项。")


if __name__ == "__main__":
    a = SearchEngineGUI()
    a.root.mainloop()
# def search(self):
#     folder_path = self.folder_path_entry.get()
#
#     query = self.search_query_entry.get()
#
#     if not folder_path or not query:
#         self.results_text.delete(1.0, tk.END)
#         self.results_text.insert(tk.END, "请提供文件夹路径和检索内容。")
#         return
#
#     texts = self.read_files(folder_path)
#     tokenized_texts = self.tokenize_text(texts)
#
#
#
#     results = []
#     for i, tokens in enumerate(tokenized_texts):
#         if query in tokens:
#             results.append(f"结果{i + 1}: {texts[i]}")
#     self.results_text.delete(1.0, tk.END)
#
#     if results:
#         for result in results:
#             self.results_text.insert(tk.END, result + "\n")
#     else:
#         self.results_text.insert(tk.END, "未找到匹配项。")
