import glob
import jieba
import re
import threading
import hashlib
import json
from whoosh.qparser import QueryParser
from whoosh.qparser import MultifieldParser
from whoosh.index import open_dir
from socket import *
import select
from whoosh.query import compound, Term

'''
来源：
'''

class WhooshSearch:
    def __init__(self):
        self.ix = open_dir("indexdir")
        self.tcp_socket_server = socket(AF_INET,SOCK_STREAM)
        self.tcp_socket_server.bind(('',8090))
        self.tcp_socket_server.listen(128)
        #self.tcp_socket_server.settimeout(3)

    def __del__(self):
        self.tcp_socket_server.close()
    
    def search(self, searcher, word, page, cnt, detail):
        #索引会自动转成小写
        seg = jieba.cut(word.lower(), cut_all=True)
        title_query = [] #[Term('author', '黄河'), Term('author', '杜甫')]
        content_query = [] #[Term('content', '黄河'), Term('content', '杜甫')]
        for s in seg:
            if s == "" or s.isspace():
                continue
            print(s)
            title_query.append(Term('title', s))
            content_query.append(Term('content', s))
        #seg_new = " ".join(seg)
        #print(seg_new)
        query = compound.Or([compound.Or(title_query), compound.Or(content_query)]) #MultifieldParser(["title", "content"], self.ix.schema).parse(word)
        if detail != 1:
            query = compound.Or(title_query) #QueryParser("title", self.ix.schema).parse(word)
        results = searcher.search_page(query, page, pagelen = cnt)
        # Allow larger fragments
        #results.fragmenter.maxchars = 300
        # Show more context before and after
        #results.fragmenter.surround = 150
        res = {}
        res["all"] = len(results)
        res["data"] = []
        print("size :" + str(results))
        for hit in results:
            d = {}
            d["title"] = hit["title"] #hit.highlights("title")
            if detail == 1:
                d["content"] = hit["content"][0:400] + "..." #.highlights("content")
            res["data"].append(d)
        #print("res:" + json.dumps(res))
        return json.dumps(res)

    def process(self):
        jieba.load_userdict("./jieba_dict/user.dict")
        with self.ix.searcher() as searcher:
            self.search(searcher, "银行", 1, 10, 1)
            while True:
                try:
                    socket_client,ip_port = self.tcp_socket_server.accept()
                    print("client:", ip_port)
                    socket_client.setblocking(0)
                    ready = select.select([socket_client], [], [], 3)

                    head_data = socket_client.recv(4)

                    body_len = int().from_bytes(head_data, byteorder='little')
                    print("head len：%s" % str(body_len))

                    recv_size = 0
                    res_data = b""
                    while recv_size < body_len:
                        data = socket_client.recv(body_len - recv_size)  # 接收剩余的数据
                        if len(data) == 0:
                            break
                        recv_size += len(data)
                        res_data += data

                    query_param = res_data.decode("utf-8")
                    query_json = json.loads(query_param)

                    data = self.search(searcher, query_json["word"], query_json["page"], query_json["cnt"], query_json["detail"])
                    print("send：%d" % len(data.encode("utf-8")))
                    socket_client.sendall((len(data.encode("utf-8"))).to_bytes(4, byteorder="little"))
                    socket_client.sendall(data.encode("utf-8"))
                    socket_client.close()
                except Exception as e:
                    print(str(e))

if __name__ == '__main__':
    d = WhooshSearch()
    d.process()
