import json
import time
import threading
from flask import Flask, request, jsonify
from synonyms import synonyms
from flask_cors import CORS
import pika
import redis
import concurrent.futures

from select_query_class import elasticsearch


RABBITMQ_USERNAME = 'MjpyYWJiaXRtcS1zZXJ2ZXJsZXNzLWNuLXVxbTNrYnA3dDA4OkxUQUk1dEc5WkNBdTJEQnF5UWQ4S01oaQ=='
RABBITMQ_PASSWORD = 'MDgyQUIxNzM0NzQyQzEwNDNGNTcyOEMxRTcyRkZFNTkwRThGOEIyNjoxNzA1MDQ2Mjk4MTY3'
RABBITMQ_HOST = 'rabbitmq-serverless-cn-uqm3kbp7t08.cn-hangzhou.amqp-14.net.mq.amqp.aliyuncs.com'


# 创建 Redis 连接
redis_host = 'localhost'  # 你的 Redis 服务器地址
redis_port = 6379          # 你的 Redis 服务器端口
redis_db = 0              # 你的 Redis 数据库编号
redis_password = None     # 你的 Redis 密码（如果有的话）
redis_connection = redis.StrictRedis(host=redis_host, port=redis_port, db=redis_db, password=redis_password)
expiration_time = 900  # 15 minutes

page_size = 20


es = elasticsearch(index_name="policy")
# 检查是否连接成功
if es:
    print("Elasticsearch 连接成功!")
else:
    print("Elasticsearch 连接失败!")

# connect queue
def get_rabbitmq_connection(username, password, host):
    credentials = pika.PlainCredentials(username, password)
    parameters = pika.ConnectionParameters(host=host, credentials=credentials)
    connection = pika.BlockingConnection(parameters)
    return connection

def send_to_queue(task_id, query, level, province, city, source, tag, page, starttime, endtime):
    connection = get_rabbitmq_connection(RABBITMQ_USERNAME, RABBITMQ_PASSWORD, RABBITMQ_HOST)
    channel = connection.channel()

    channel.queue_declare(queue='_policy')

    message = json.dumps({"task_id": task_id, "query": query, "level": level, "province": province, "city": city, "source": source, "tag": tag, "page": page, "starttime": starttime, "endtime": endtime})

    channel.basic_publish(exchange='', routing_key='_policy', body=message)

    connection.close()


# consume queue
def callback(task_id, query, level, province, city, source, tag, page, starttime, endtime):
    # 检索开始
    start = time.time()
    new_json = []
    new_data_list = []
    json_list = []
    count = 0

    data = es.search(query, level, province, city, source, tag, page, starttime, endtime)
    address_data = data["hits"]["hits"]

    # 检索词分词
    syn_str = synonyms.seg(query)
    for syn_item in syn_str[0]:
        # 分词的相似词
        nearby = synonyms.nearby(syn_item)

        # 相似度>0.7时才搜索其近似词，否则只搜索该分词
        if nearby[0] and nearby[1][1] > 0.85:
            count = sum(i > 0.85 for i in nearby[1])
            seg_nearby = nearby[:count]
            if seg_nearby and seg_nearby[1]:
                nearby_dict = dict(zip(seg_nearby[0], seg_nearby[1]))
                for key, value in nearby_dict.items():
                    data_list = seg_query(key, level, province, city, source, tag, page, starttime, endtime, value)
                    # 所有近似词json连接
                    new_data_list.extend(data_list)

    # 检索词json和相似词json连接
    new_json.extend(address_data)
    new_json.extend(new_data_list)

    # json去重
    json_list = del_repeat(new_json)
    count = len(json_list)

    end = time.time()

    # 检查页数是否超出总页数
    if page * 20 >= count + 20:
        # 返回错误码
        response_data = {
            "error_msg": "Page out of range",
            "uuid": task_id,
            "count": count
        }
        json_data = json.dumps(response_data, ensure_ascii=False)
        redis_connection.setex(task_id, expiration_time, json_data)
        #print("page out of range")
        return

    new_one_json = json.dumps(json_list, ensure_ascii=False)

    address_data = json.loads(new_one_json)

    start_index = (page - 1) * page_size
    end_index = page * page_size
    end_index = min(end_index, count)
    result = [item["_source"] for item in address_data[start_index:end_index]]

    response_data = {
        "uuid": task_id,
        "data": result,
        "count": count
    }
    json_data = json.dumps(response_data, ensure_ascii=False)

    redis_connection.setex(task_id, expiration_time, json_data)

    #print("检索条目(result)", count)
    #print('搜索时间：', float(end - start) * 1000, "ms")  # 打印搜索时长

    ch.basic_ack(delivery_tag=method.delivery_tag)


def consume_from_queue():
    connection = get_rabbitmq_connection(RABBITMQ_USERNAME, RABBITMQ_PASSWORD, RABBITMQ_HOST)
    channel = connection.channel()

    channel.queue_declare(queue='_policy')

    channel.basic_qos(prefetch_count=100)

    def on_message_callback(ch, method, properties, body):
        with concurrent.futures.ThreadPoolExecutor() as executor:
            # 解析消息
            data = json.loads(body)
            task_id = data['task_id']
            query = data['query']
            level = data['level']
            province = data['province']
            city = data['city']
            source = data['source']
            tag = data["tag"]
            page = data["page"]
            starttime = data["starttime"]
            endtime = data["endtime"]
            if starttime is None: starttime = ""
            if endtime is None: endtime = ""

            # 异步执行 callback 逻辑
            future = executor.submit(callback, task_id, query, level, province, city, source, tag, page, starttime, endtime)
            # 处理完成后发送确认信号
            future.add_done_callback(lambda x: ch.basic_ack(delivery_tag=method.delivery_tag))


        # 处理完成后发送确认信号
        ch.basic_ack(delivery_tag=method.delivery_tag)

    channel.basic_consume(queue='_policy', on_message_callback=on_message_callback)

    print("Waitting for messages...")
    channel.start_consuming()


def del_repeat(data):
    """ json字符串去重
        :data: json字符串的list表示
        :return: 去重后新json字符串
    """
    new_data = []  # 用于存储去重后的list
    values = []    # 用于存储当前已有的值
    for d in data:
        if d["_source"]["id"] not in values:
            new_data.append(d)
            values.append(d["_source"]["id"])
    return new_data


def seg_query(key, level, province, city, source, tag, page, starttime, endtime, value):
    """ 近似词查询
        :key: 近似词
        :value: 近似词相似度
        :return: 近似词检索返回的json字符串
    """
    data = es.search(key, level, province, city, source, tag, page, starttime, endtime)
    address_data = data["hits"]["hits"]
    for item in address_data:
        score = float(item["_score"])
        score *= value
        item["_score"] = str(score)
    return address_data


def get_date_content_by_id(query_id):
    data = es.search_by_id(query_id)
    address_data = data["hits"]["hits"]

    result_data = []
    for item in address_data:
        result_item = {
            "province": item["_source"]["province"],
            "pubDate": item["_source"]["pubDate"],
            "short_by_content": item["_source"]["short_by_content"],
            "city": item["_source"]["city"]
        }
        result_data.append(result_item)

    return result_data

consumer_thread = threading.Thread(target=consume_from_queue)
consumer_thread.daemon = True  # 将线程设置为守护线程
consumer_thread.start()



