from typing import List

import uvicorn
from fastapi import FastAPI, Query
from pydantic import BaseModel
from transformers import BertTokenizer, BertModel
import torch
import thulac
from elasticsearch import Elasticsearch
import time
import spacy

app = FastAPI()

# Model of Thulac
thu = thulac.thulac()

# Model and tokenizer
simcse_sup_path = "hellonlp/simcse-roberta-base-zh"
tokenizer = BertTokenizer.from_pretrained(simcse_sup_path)
model = BertModel.from_pretrained(simcse_sup_path)
index_name = "es_ob_file_vector_new"

nlp = spacy.load('zh_core_web_sm')


def extract_words_from_text(filename):
    words = []
    with open(filename, 'r', encoding='utf-8') as file:
        for line in file:
            # 假设文本文件每一行都遵循“单词,数字”的格式
            # 如果文件中有多行，且每行格式相同，则此循环可以正常工作
            word, _ = line.strip().split(',')  # 使用split按逗号分割，只取第一个元素（单词）
            words.append(word.strip())  # 移除单词两边的空格（如果有的话）
    return words


proper_nouns = extract_words_from_text("result1.txt")
nlp.tokenizer.pkuseg_update_user_dict(proper_nouns)

# Elasticsearch connection
es = Elasticsearch([
    {
        'host': '192.168.124.248',
        'port': 9200,
        'scheme': 'http'  # 根据实际情况使用http或https
    }
])


# Function to get sentence embeddings
def get_vector_simcse(sentence):
    input_ids = torch.tensor(tokenizer.encode(sentence, max_length=512, truncation=True)).unsqueeze(0)
    with torch.no_grad():
        outputs = model(input_ids)
    return outputs.last_hidden_state[:, 0].squeeze(0).numpy()


def get_entity_by_thulac(sentence):
    words_with_pos = thu.cut(sentence)
    noun_list = [word for word, pos in words_with_pos if pos.startswith(('n', 'nz', 'vn'))]
    return noun_list


def get_entity_by_spacy(sentence):
    doc = nlp(sentence)
    pos_tags = [(token.text, token.pos_) for token in doc]
    # noun_list = [item[0] for item in pos_tags if item[1] == 'NOUN']
    noun_list = [item[0] for item in pos_tags if item[1] in ['NOUN', 'VERB']]
    return noun_list

# Request body model
class QueryInput(BaseModel):
    question: str


# Response body model
class SearchResult(BaseModel):
    question: str
    confidence: float


@app.get("/search/keyword/batch")
async def key_word_search_batch(query: str = Query(..., title="Input question"),
                                ids: str = Query(..., title="IDs",
                                                 description="A list of IDs related to the query")):
    if query == "":
        return []
    if ids:  # 检查ids是否为非空字符串
        ids_list = ids.split(',')
    else:
        ids_list = []  # 如果ids是空字符串，则ids_list直接设置为空列表

    entities = get_entity_by_spacy(query)

    # body = {
    #     "query": {
    #         "bool": {
    #             "filter": [{"terms": {"detectionType.keyword": ids_list}}] if ids_list else [],
    #             "should": [{"match": {"description": entity}} for entity in entities],
    #             # 如果没有指定minimum_should_match，则默认为1（即至少匹配一个should子句）
    #             # "minimum_should_match": 1  # 如果需要显式指定
    #         }
    #     },
    #     "_source": ["ask", "description", "detectionType"]
    # }

    body = {
        "query": {
            "bool": {
                "filter": [{"terms": {"detectionType.keyword": ids_list}}] if ids_list else [],
                "should": [
                              {"match": {"ask": entity}} for entity in entities
                          ] + [
                              {"match": {"description": entity}} for entity in entities
                          ],
            }
        },
        "_source": ["ask", "description", "detectionType"],
        "sort": [
            {
                "ask.keyword": {
                    "order": "asc"
                }
            },
            {
                "description.keyword": {
                    "order": "asc"
                }
            }
        ]
    }

    if not body["query"]["bool"]["filter"]:
        del body["query"]["bool"]["filter"]

    sem_search = es.search(
        index=index_name,
        body=body
    )
    search_results = []
    for hit in sem_search["hits"]["hits"]:
        question = hit["_source"]["ask"].encode('utf-8').decode('utf-8')
        description = hit["_source"]["description"].encode('utf-8').decode('utf-8')
        detection_type = hit["_source"]["detectionType"]
        if entities and any(entity in question for entity in entities):
            result = {"ask": question, "description": description, "detectionType": detection_type}
            search_results.append(result)

    return search_results


# Define endpoint for semantic search
@app.get("/search/batch")
async def semantic_search_batch(query: str = Query(..., title="Input question"),
                                ids: str = Query(..., title="IDs",
                                                 description="A list of IDs related to the query")):
    if query == "":
        return []

    if ids:  # 检查ids是否为非空字符串
        ids_list = ids.split(',')
    else:
        ids_list = []  # 如果ids是空字符串，则ids_list直接设置为空列表

    # Process query
    # inp_question = query_input.question
    encode_start_time = time.time()
    question_embedding = get_vector_simcse(query)
    encode_end_time = time.time()

    query_body = {
        "query": {
            "script_score": {
                "query": {"match_all": {}},
                "script": {
                    "source": "cosineSimilarity(params.query_vector, doc['question_vector']) + 1.0",
                    "params": {"query_vector": question_embedding.tolist()}
                }
            }
        },
        "_source": ["ask", "description", "detectionType"],
        "sort": [{"_score": {"order": "desc"}}]
    }

    if any(ids_list):
        query_body["query"] = {
            "bool": {
                "filter": [{"terms": {"detectionType.keyword": ids_list}}],  # 添加过滤器
                "must": [query_body["query"]]  # 保留原有的script_score查询
            }
        }

    # Semantic search using script_score
    sem_search = es.search(
        index=index_name,
        body=query_body
    )

    # entities = get_entity_by_thulac(query)
    entities = get_entity_by_spacy(query)

    # Extract and format results
    search_results = []
    for hit in sem_search["hits"]["hits"]:
        # 获取detectionType并检查它是否在ids列表中
        detection_type = hit["_source"]["detectionType"]
        question = hit["_source"]["ask"].encode('utf-8').decode('utf-8')
        description = hit["_source"]["description"].encode('utf-8').decode('utf-8')

        if entities and any(entity in question for entity in entities):
            result = {"ask": question, "description": description, "detectionType": detection_type}
            search_results.append(result)

    return search_results


# Define endpoint for semantic search
@app.get("/search")
async def semantic_search(query: str = Query(..., title="Input question")):
    # Process query
    # inp_question = query_input.question
    question_embedding = get_vector_simcse(query)

    # Semantic search using script_score
    sem_search = es.search(
        index=index_name,
        body={
            "query": {
                "script_score": {
                    "query": {"match_all": {}},
                    "script": {
                        "source": "cosineSimilarity(params.query_vector, doc['question_vector']) + 1.0",
                        "params": {"query_vector": question_embedding.tolist()}
                    }
                }
            },
            "_source": ["ask"],
            "size": 30,
            "sort": [{"_score": {"order": "desc"}}]
        }
    )

    # entities = get_entity_by_thulac(query)
    entities = get_entity_by_spacy(query)

    # Extract and format results
    search_results = []
    for hit in sem_search["hits"]["hits"]:
        question = hit["_source"]["ask"].encode('utf-8').decode('utf-8')
        confidence = hit["_score"]
        if entities and any(entity in question for entity in entities):
            result = {"question": question, "confidence": confidence}
            search_results.append(result)
        # search_results.append({"question": question, "confidence": confidence})

    return search_results


# 启动FastAPI服务器
if __name__ == "__main__":
    uvicorn.run(app, host="0.0.0.0", port=8100)
