import re
from collections import defaultdict
from concurrent.futures import ThreadPoolExecutor

import enchant
import nltk
from elasticsearch.helpers import bulk
from flask import Blueprint, request
from nltk import pos_tag, word_tokenize
from nltk.corpus import wordnet
import NLPTool.NLPTool
from Model.models import User, UserRole
from Utils.APIResponse import APIResponse
from Utils.TokenUtil import token_required
from elasticsearch import Elasticsearch
import datetime
import json
import pandas
import threading

from conf import es_url, code_time_csv

search_blue = Blueprint("search", __name__)
es_client = Elasticsearch(hosts=es_url)
# Create locks for thread-safe updates
module_lock = threading.Lock()
teacher_lock = threading.Lock()
pg_lock = threading.Lock()
ug_lock = threading.Lock()
# 初始化英语词典
en_dict = enchant.Dict("en_US")


@search_blue.route('/search', methods=['POST'])
@token_required
def search(request_user_id):
    print(request_user_id)
    try:
        # Get parameters from the frontend
        search_params = _get_search_params(request)

        # Parameter validation
        if not search_params["search_term"]:
            return APIResponse.error({'error': 'Missing search_term parameter.'}, status_code=400)

        # Construct the query
        query = _construct_query(search_params)

        # Execute the search request
        response = es_client.search(index="conversation", body=query)

        # Extract and process search results
        search_results = _process_search_results(search_params, response)

        return APIResponse.success(data=search_results)

    except (json.JSONDecodeError, ValueError) as e:
        return APIResponse.error({'error': 'Invalid JSON format: ' + str(e)}, status_code=400)

    except Exception as e:
        return APIResponse.error({'error': 'Error searching: ' + str(e)}, status_code=500)


def _get_search_params(request):
    search_params = {
        "search_term": request.json.get("search_term"),
        "chat_id": request.json.get("chat_id"),
        "talker_name": request.json.get("talker_name"),
        "start_time": request.json.get("start_time"),
        "end_time": request.json.get("end_time"),
        "max_time_diff": request.json.get("max_time_diff", None),
        "page": request.json.get("page", 1),
        "per_page": request.json.get("per_page", 10),
        "module": request.json.get("module"),
        "degree": request.json.get("degree"),
        "phrase": request.json.get("phrase"),
        "pause_time": request.json.get("pause_time")
    }
    return search_params


def _construct_query(search_params):
    query = {
        "query": {
            "bool": {
                "must": [
                    {
                        "wildcard": {
                            "content": {
                                "value": search_params['search_term'] + "*"
                            }
                        }

                    }
                ],
                "filter": []

            }
        },

        "aggs": {
            "total_results": {
                "value_count": {
                    "field": "id"  # 使用任意唯一字段计算总结果数
                }
            }
        },
        "from": (search_params["page"] - 1) * search_params["per_page"],
        "size": search_params["per_page"]
    }

    if search_params["talker_name"]:
        query["query"]["bool"]["filter"].append({"term": {"talker_name": search_params["talker_name"]}})

    if search_params["chat_id"]:
        query["query"]["bool"]["filter"].append({"term": {"chat_id": search_params["chat_id"]}})
    if search_params["module"]:
        query["query"]["bool"]["filter"].append({"match": {"teacher_info.module": search_params["module"]}})
    if search_params["degree"]:
        query["query"]["bool"]["filter"].append({"match": {"teacher_info.degree": search_params["degree"]}})
    if search_params["phrase"]:
        query["query"]["bool"]["filter"].append({"match_phrase": {
            "content": search_params["phrase"]
        }})
    if search_params["pause_time"]:
        query["query"]["bool"]["filter"].append({"range": {"pause_time": {"gt": search_params["pause_time"]}}})

    if search_params["start_time"] and search_params["end_time"]:
        query["query"]["bool"]["filter"].append({"range": {"start_time": {"gte": search_params["start_time"]}}})
        query["query"]["bool"]["filter"].append({"range": {"end_time": {"lte": search_params["end_time"]}}})

    return query


def _construct_update_query(search_params):
    query = {
        "query": {
            "bool": {
                "must": [
                    {
                        "wildcard": {
                            "content": {
                                "value": search_params['search_term'] + "*"
                            }
                        }

                    }
                ],
                "filter": []

            }
        },

        "aggs": {
            "total_results": {
                "value_count": {
                    "field": "id"  # 使用任意唯一字段计算总结果数
                }
            }
        },
    }
    return query


def _process_search_results(search_params, response):
    hits = response["hits"]["hits"]
    total_results = response["aggregations"]["total_results"]["value"]
    total_pages = (total_results + search_params["per_page"] - 1) // search_params["per_page"]
    search_results = [hit["_source"] for hit in hits]

    # Filter results based on the maximum time difference
    filtered_results = []
    for result in search_results:
        start_time_result = datetime.datetime.strptime(result["start_time"], "%H:%M:%S")
        end_time_result = datetime.datetime.strptime(result["end_time"], "%H:%M:%S")
        time_diff = (end_time_result - start_time_result).total_seconds()
        if search_params["max_time_diff"] is None or time_diff <= search_params["max_time_diff"]:
            filtered_results.append(result)

    # Append additional data to search results
    for result in filtered_results:
        result['context_documents'] = get_context_documents('conversation', result['id'], 2)

    return {
        "total_results": total_results,
        "total_pages": total_pages,
        "search_results": search_results
    }


@search_blue.route('/search/analysis', methods=['POST'])
@token_required
def search_analysis(request_user_id):
    print(request_user_id)
    try:
        id = request.json.get("id")
        from app import redis
        dsl = {
            "query": {
                "match": {
                    "id": str(id)
                }
            }
        }
        content = es_client.search(index='conversation', body=dsl)['hits']['hits'][0]['_source']['content']
        # Check if data is cached in Redis
        cached_data = redis.hget('t_conversation' + str(id), "t_conversation")
        if cached_data:
            result = json.loads(cached_data)
        else:
            # Perform NLP analysis if data is not cached
            result = NLPTool.NLPTool.analyze_content(content)
            json_data = json.dumps(result)
            # Cache the data in Redis with a reasonable expiration time
            redis.hset('t_conversation' + str(id), "t_conversation", json_data)

        return APIResponse.success(result)

    except Exception as e:
        return APIResponse.error({'error': 'Error analyzing content: ' + str(e)}, status_code=500)


@search_blue.route('/search/teacher', methods=['GET'])
@token_required
def get_teacher_name(request_user_id):
    print(request_user_id)
    # 读取 CSV 文件
    df_code = pandas.read_csv(code_time_csv)
    teacher = df_code.to_dict(orient='records')
    df_teacher = pandas.read_csv(teacher_csv)
    module = df_teacher['module'].tolist()
    return APIResponse.success({"teacher": teacher, "module": list(set(module))})


@search_blue.route('/search/code', methods=['GET'])
@token_required
def get_teacher_info(request_user_id):
    print(request_user_id)
    df_teacher = pandas.read_csv(teacher_csv)
    codes = list(set(df_teacher['code'].tolist()))
    result_dic = {}
    for code in codes:
        teacher_data = df_teacher[df_teacher['code'] == code]
        teacher_info = [{row['module']: row['degree']} for _, row in teacher_data.iterrows()]
        result_dic[code] = teacher_info
    return APIResponse.success(result_dic)


@search_blue.route('/search/update', methods=['POST'])
@token_required
def update(request_user_id, scroll_duration='1m'):
    try:
        # Get the search parameters from the request's JSON data
        request_data = request.get_json()
        search_term = request_data.get("search_term")
        request_user = User.query.get(request_user_id)

        if request_user.role.value != UserRole.ADMIN.value:
            return APIResponse.error({'error': 'no permission'}, status_code=401)

        # Check if search_params is provided and not empty
        if not search_term:
            return APIResponse.error({'error': 'Missing or empty search_term'}, status_code=400)
        search_params = {
            "search_term": search_term
        }

        query = {
            "query": {
                "bool": {
                    "must": [
                        {
                            "wildcard": {
                                "content": {
                                    "value": search_params['search_term'] + "*"
                                }
                            }
                        }
                    ],
                    "filter": []
                }
            }
        }

        response = es_client.search(index='conversation', scroll=scroll_duration, body=query)

        # Process search results and update index using bulk API
        success = _update_search_results(search_params, response)

        if success:
            return APIResponse.success({'message': 'Search results updated successfully'})
        else:
            return APIResponse.error({'error': 'Failed to update search results'}, status_code=500)

    except json.JSONDecodeError as e:
        # Handle JSON decode error
        return APIResponse.error({'error': 'Invalid JSON format: ' + str(e)}, status_code=400)

    except Exception as e:
        # Handle other exceptions
        return APIResponse.error({'error': 'Error updating: ' + str(e)}, status_code=500)


def _update_search_results(search_params, response):
    try:
        bulk_operations = []  # List to store bulk update operations

        # Process the initial search results
        for hit in response['hits']['hits']:
            doc_id = hit['_id']
            page_content = hit['_source']['content']

            updated_content = page_content.replace(search_params["search_term"], '*')

            # Prepare the update operation for the bulk API
            update_operation = {
                '_op_type': 'update',
                '_index': 'conversation',
                '_id': doc_id,
                'doc': {
                    'content': updated_content
                }
            }

            bulk_operations.append(update_operation)

        # Process the remaining pages if there are more than one page
        scroll_id = response['_scroll_id']
        while scroll_id and len(response['hits']['hits']) > 0:
            response = es_client.scroll(scroll_id=scroll_id, scroll='1m')
            scroll_id = response.get('_scroll_id')

            for hit in response['hits']['hits']:
                doc_id = hit['_id']
                page_content = hit['_source']['content']
                updated_content = page_content.replace(search_params["search_term"], '*')

                # Prepare the update operation for the bulk API
                update_operation = {
                    '_op_type': 'update',
                    '_index': 'conversation',
                    '_id': doc_id,
                    'doc': {
                        'content': updated_content
                    }
                }

                bulk_operations.append(update_operation)

        # Use the Elasticsearch bulk API to perform the update operations
        bulk(es_client, bulk_operations)

        return True

    except Exception as e:
        # Handle any exceptions that occur during the update process
        print("Error updating search results:", str(e))
        return False


def get_context_documents(index, current_id, num_context_docs):
    # 将当前ID转换为整数
    current_id = int(current_id)

    # 获取ID范围为当前ID前后的文档
    query = {
        "query": {
            "range": {
                "id": {
                    "gte": max(1, current_id - num_context_docs),
                    "lte": current_id + num_context_docs
                }
            }
        },
        "sort": [{"id": "asc"}],
        "size": num_context_docs * 2 + 1  # 加1是为了包括当前ID的文档
    }

    # 发送查询请求
    response = es_client.search(index=index, body=query)
    hits = response['hits']['hits']
    search_results = [hit["_source"] for hit in hits]
    return search_results


@search_blue.route('/search/count', methods=['POST'])
@token_required
def count(request_user_id):
    print(request_user_id)
    try:
        data = request.get_json()
        keyword = data.get('keyword', '')  # 获取关键词参数
        phrase = data.get('phrase', '')
        talker_name = data.get('talker_name', '')
        module = data.get('module', '')
        degree = data.get('degree', '')
        pause_time = data.get('pause_time', '')

        query = {
            "query": {
                "bool": {
                    "filter": []
                }
            },
            "aggs": {
                "by_code": {
                    "terms": {
                        "field": "teacher_info.code.keyword",
                        "size": 10
                    }
                },
                "by_module": {
                    "terms": {
                        "field": "teacher_info.module.keyword",
                        "size": 10
                    }
                },
                "by_degree": {
                    "terms": {
                        "field": "teacher_info.degree.keyword",
                        "size": 10
                    }
                }
            }
        }
        if keyword:
            query["query"]["bool"]['filter'].append({
                "match": {
                    "content": keyword
                }
            })
        if talker_name:
            query["query"]["bool"]["filter"].append({"term": {"talker_name": talker_name}})

        if module:
            query["query"]["bool"]["filter"].append({"match": {"teacher_info.module": module}})
        if degree:
            query["query"]["bool"]["filter"].append({"match": {"teacher_info.degree": degree}})
        if phrase:
            query["query"]["bool"]["filter"].append({"match_phrase": {
                "content": phrase
            }})
        if pause_time:
            query["query"]["bool"]["filter"].append({"range": {"pause_time": {"gt": pause_time}}})

        result = es_client.search(index='conversation', body=query)

        # 解析结果并构造返回数据
        response_data = {
            "by_code": result['aggregations']['by_code']['buckets'],
            "by_module": result['aggregations']['by_module']['buckets'],
            "by_degree": result['aggregations']['by_degree']['buckets']
        }

        return APIResponse.success(response_data)

    except Exception as e:
        return APIResponse.error({"error": str(e)}, 500)


def is_valid_english_word(word):
    dictionary = en_dict
    return dictionary.check(word)


# 清理单词
def clean_word(word):
    cleaned_word = re.sub(r'[^a-zA-Z]', '', word)
    return cleaned_word.lower()


# 获取同义词列表
def get_synonyms_from_pos(word, pos):
    synonyms = []
    pos_mapping = {
        "NOUN": wordnet.NOUN,
        "VERB": wordnet.VERB,
        "ADJ": wordnet.ADJ,
        "ADV": wordnet.ADV
    }

    if pos in pos_mapping:
        synsets = wordnet.synsets(word, pos=pos_mapping[pos])
        for syn in synsets:
            for lemma in syn.lemmas():
                synonyms.append(lemma.name())

    return list(set(synonyms))


# 获取反义词列表
def get_antonyms_from_pos(word, pos):
    antonyms = []
    pos_mapping = {
        "NOUN": wordnet.NOUN,
        "VERB": wordnet.VERB,
        "ADJ": wordnet.ADJ,
        "ADV": wordnet.ADV
    }

    if pos in pos_mapping:
        synsets = wordnet.synsets(word, pos=pos_mapping[pos])
        for syn in synsets:
            for lemma in syn.lemmas():
                if lemma.antonyms():
                    antonyms.append(lemma.antonyms()[0].name())

    return list(set(antonyms))


# 搜索并处理结果
def search_and_process(keyword):
    query = {
        "query": {
            "wildcard": {
                "content": f"{keyword}*"
            }
        }
    }

    try:
        result = es_client.search(index='conversation', body=query, size=100)
        hits = result.get('hits', {}).get('hits', [])

        word_variations = []
        for hit in hits:
            content = hit['_source']['content']
            words = content.split()
            for word in words:
                if word.startswith(keyword):
                    cleaned_word = clean_word(word)
                    if cleaned_word and en_dict.check(cleaned_word):
                        word_variations.append(cleaned_word)

        unique_word_variations = list(set(word_variations))
        word_pos_list = get_word_pos(keyword)
        # 创建一个词性字典，用于存储同义词和反义词
        synonyms_dict = {pos: [] for pos in word_pos_list}
        antonyms_dict = {pos: [] for pos in word_pos_list}
        for word_pos in word_pos_list:
            word_synonyms = search_with_synonyms(keyword, word_pos)
            word_antonyms = search_with_antonyms(keyword, word_pos)
            synonyms_dict[word_pos].extend(word_synonyms)
            antonyms_dict[word_pos].extend(word_antonyms)

        return APIResponse.success({
            "family_words": unique_word_variations,
            "synonyms": synonyms_dict,
            "antonyms": antonyms_dict,
            "word_pos": word_pos_list
        })

    except Exception as e:
        print("Error:", str(e))
        return APIResponse.error({"error": str(e)}, 500)


@search_blue.route('/search/word', methods=['POST'])
@token_required
def find_word_variations(request_user_id):
    print(request_user_id)
    data = request.get_json()
    keyword = data.get('keyword', '')
    results = search_and_process(keyword)
    return results


@search_blue.route('/search/classify', methods=['POST'])
@token_required
def classify_word(request_user_id):
    print(request_user_id)
    data = request.get_json()
    keyword = data.get('keyword', '')
    results = classify(keyword)
    return results


def classify(keyword):
    try:
        word_pos_list = get_word_pos(keyword)
        # 创建一个词性字典，用于存储同义词和反义词
        classify_dict = {}
        for word_pos in word_pos_list:
            collocations_dict = get_collocations_dict(keyword, word_pos)
            classify_dict[word_pos] = collocations_dict
        return APIResponse.success(classify_dict)

    except Exception as e:
        print("Error:", str(e))
        return APIResponse.error({"error": str(e)}, 500)


def search_with_antonyms(keyword, pos):
    antonyms = get_antonyms_from_pos(keyword, pos)
    matching_antonyms = []

    for ant in antonyms:
        query = {
            "query": {
                "match": {
                    "content": ant
                }
            }
        }

        try:
            result = es_client.search(index='conversation', body=query, size=1)  # 调整 size 根据你的需要
            hits = result.get('hits', {}).get('hits', [])

            if hits:
                matching_antonyms.append(ant)

        except Exception as e:
            print("Error:", str(e))

    return matching_antonyms


def search_with_synonyms(keyword, pos):
    synonyms = get_synonyms_from_pos(keyword, pos)
    matching_synonyms = []

    for syn in synonyms:
        query = {
            "query": {
                "match": {
                    "content": syn
                }
            }
        }

        try:
            result = es_client.search(index='conversation', body=query, size=1)  # 调整 size 根据你的需要
            hits = result.get('hits', {}).get('hits', [])

            if hits and syn != keyword:
                matching_synonyms.append(syn)

        except Exception as e:
            print("Error:", str(e))

    return matching_synonyms


def get_word_pos(word):
    synsets = wordnet.synsets(word)
    word_pos_list = []

    for synset in synsets:
        pos = synset.pos()

        if pos == 'v':
            word_pos_list.append("VERB")
        elif pos == 'n':
            word_pos_list.append("NOUN")
        elif pos == 'a':
            word_pos_list.append("ADJ")
        elif pos == 'r':
            word_pos_list.append("ADV")

    return list(set(word_pos_list)) if word_pos_list else ["UNKNOWN"]


pos_mapping = {
    "NOUN": 'N',
    "VERB": 'V',
    "ADJ": 'J',
    "ADV": 'R'
}

word_dict = {
    "noun": ["NN", "NNS", "NNP", "NNPS"],
    "adjective": ["JJ", "JJR", "JJS"],
    "verb": ["VB", "VBD", "VBG", "VBN", "VBP", "VBZ"],
    "adverb": ["RB", "RBR", "RBS"]
}

english_tags = {
    "noun": {"NN", "NNS", "NNP", "NNPS"},
    "adjective": {"JJ", "JJR", "JJS"},
    "verb": {"VB", "VBD", "VBG", "VBN", "VBP", "VBZ"},
    "adverb": {"RB", "RBR", "RBS"},
    "pronoun": {"PRP", "PRP$", "WP", "WP$"},
    "preposition": {"IN", "TO"},
    "conjunction": {"CC"},
    "numeral": {"CD"},
    "determiner": {"DT", "PDT", "WDT"},
    "interjection": {"UH"},
    "symbol": {"SYM"},
    "unknown": {"FW", "LS", "POS", "RP", "SYM"},
}


def process_text(text, keyword, keyword_pos, collocations_dict):
    # 分词并获取词性标签
    words = word_tokenize(preprocess_text(text))
    pos_tags = pos_tag(words)

    # 查找关键词的位置
    word_index = None
    for index, (word, pos) in enumerate(pos_tags):
        if word == keyword:
            word_index = index
            break

    if word_index is not None:
        keyword_pos_to_use = keyword_pos.lower()[0]  # 统一使用小写的词性标签

        # 查找关键词前面的搭配
        if keyword_pos_to_use in ('n', 'r'):
            collocation_index = word_index - 1
            if 0 <= collocation_index < len(pos_tags):
                collocation_word, collocation_pos = pos_tags[collocation_index]
                for key, values in word_dict.items():
                    if collocation_pos in values:
                        found_key = key
                        pos_key = f"{found_key} + {keyword}"
                        collocation_phrase = f"{collocation_word} {keyword}"
                        collocations_dict[pos_key][collocation_phrase] += 1
                    else:
                        second_collocation_index = collocation_index - 1

                        if 0 <= second_collocation_index < len(pos_tags):
                            second_collocation_word, second_collocation_pos = pos_tags[second_collocation_index]

                            if second_collocation_pos in values:
                                found_key = key
                                for second_key, second_values in english_tags.items():
                                    if collocation_pos in second_values:
                                        second_found_key = second_key
                                        key = f"{found_key} + {second_found_key} + {keyword}"
                                collocation_phrase = f"{second_collocation_word} {collocation_word} {keyword}"
                                collocations_dict[key][collocation_phrase] += 1

        # 查找关键词后面的搭配
        if keyword_pos_to_use in ('v', 'j'):
            collocation_index = word_index + 1
            if 0 <= collocation_index < len(pos_tags):
                collocation_word, collocation_pos = pos_tags[collocation_index]
                for key, values in word_dict.items():
                    if collocation_pos in values:
                        found_key = key
                        pos_key = f"{keyword} + {found_key}"
                        collocation_phrase = f"{keyword} {collocation_word}"
                        collocations_dict[pos_key][collocation_phrase] += 1

                    else:
                        second_collocation_index = collocation_index + 1

                        if 0 <= second_collocation_index < len(pos_tags):
                            second_collocation_word, second_collocation_pos = pos_tags[second_collocation_index]

                            if second_collocation_pos in values:
                                found_key = key
                                for second_key, second_values in english_tags.items():
                                    if collocation_pos in second_values:
                                        second_found_key = second_key
                                        key = f"{keyword} + {second_found_key} + {found_key}"
                                collocation_phrase = f"{keyword} {collocation_word} {second_collocation_word}"
                                collocations_dict[key][collocation_phrase] += 1


def get_collocations_dict(keyword, keyword_pos):
    query = {
        "query": {
            "match": {
                "content": keyword
            }
        },
        "size": 1000
    }

    collocations_dict = defaultdict(lambda: defaultdict(int))

    with ThreadPoolExecutor(max_workers=8) as executor:
        initial_search_result = es_client.search(index='conversation', body=query, scroll="1m")
        scroll_id = initial_search_result["_scroll_id"]

        while True:
            texts = [hit["_source"]["content"] for hit in initial_search_result["hits"]["hits"]]

            # Use executor.map() to process texts concurrently
            executor.map(lambda text: process_text(text, keyword, keyword_pos, collocations_dict), texts)

            next_scroll_result = es_client.scroll(scroll_id=scroll_id, scroll="1m")

            if len(next_scroll_result["hits"]["hits"]) == 0:
                break

            scroll_id = next_scroll_result["_scroll_id"]

    return collocations_dict


def preprocess_text(text):
    # 去除特殊字符和数字
    text = re.sub(r'[^a-zA-Z]', ' ', text)

    # 转换为小写
    text = text.lower()

    # 分词
    words = nltk.word_tokenize(text)

    return " ".join(words)
