#D:\code\pythonProject1\database
#youth

import os
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
import tkinter as tk
from tkinter import scrolledtext
import jieba

class SearchEngineGUI:#GUI界面
    def __init__(self):
        root = tk.Tk()
        self.root = root
        root.title("**** songの搜索引擎 ****")
        self.root.geometry("800x500+1100+500")
        self.folder_path_label = tk.Label(self.root, text="文件夹路径:")
        self.folder_path_label.pack()
        self.folder_path_entry = tk.Entry(self.root, width=50)
        self.folder_path_entry.pack()
        self.search_query_label = tk.Label(self.root, text="检索内容: ")
        self.search_query_label.pack()
        self.search_query_entry = tk.Entry(self.root, width=50)
        self.search_query_entry.pack()
        self.search_button = tk.Button(self.root,
                                       text="检索",
                                       command=self.search)
        self.search_button.pack()
        self.results_text = scrolledtext.ScrolledText(self.root, width=70, height=15)
        self.results_text.pack()
    def read_files(self,folder_path):
        texts = []
        for filename in os.listdir(folder_path):
            if filename.endswith(".txt"):
                with open(os.path.join(folder_path, filename), 'r', encoding='utf-8') as file:
                    content = file.read()
                    texts.append(content)
        return texts

    def tokenize_text(self,texts,is_chinese = True):
        tokenized_texts = []
        for text in texts:
            if is_chinese:
                tokenized_texts.append((jieba.lcut(text)))
            else:
                tokenized_texts.append(word_tokenize(text))
        return tokenized_texts


    # 2. 构建词典
    def build_dictionary(self,tokenized_texts):
        dictionary = {}
        stop_words = set(stopwords.words('english'))  # 停用词列表
        for text in tokenized_texts:
            for word in text:
                if word.isalpha() and word not in stop_words and word not in dictionary:
                    dictionary[word] = []
                elif word.isalpha() and word in dictionary:
                    dictionary[word].append('file_' + str(len(dictionary)) + '_' + str(text.index(word)))

                    # 记录单词出现的文件和位置
        return dictionary

    def search(self):
        folder_path = self.folder_path_entry.get()

        query = self.search_query_entry.get()

        if not folder_path or not query:
            self.results_text.delete(1.0, tk.END)
            self.results_text.insert(tk.END, "请提供文件夹路径和检索内容。")
            return

        texts = self.read_files(folder_path)
        tokenized_texts = self.tokenize_text(texts)



        results = []
        for i, tokens in enumerate(tokenized_texts):
            if query in tokens:
                results.append(f"结果{i + 1}: {texts[i]}")
        self.results_text.delete(1.0, tk.END)

        if results:
            for result in results:
                self.results_text.insert(tk.END, result + "\n")
        else:
            self.results_text.insert(tk.END, "未找到匹配项。")




if __name__ == "__main__":
    a = SearchEngineGUI()
    a.root.mainloop()
