import pymysql
from elasticsearch import Elasticsearch, helpers
from transformers import BertTokenizer, BertModel
import torch
from tqdm import tqdm

simcse_sup_path = "hellonlp/simcse-roberta-base-zh"
tokenizer = BertTokenizer.from_pretrained(simcse_sup_path)
model = BertModel.from_pretrained(simcse_sup_path)

# MySQL数据库配置信息
mysql_config = {
    'host': '192.168.3.5',
    'port': 3306,
    'user': 'root',
    'password': '123456',
    'database': 'safe'
}

# Elasticsearch集群配置
es = Elasticsearch([
    {
        'host': '114.55.210.76',
        'port': 20101,
        'scheme': 'http'  # 根据实际情况使用http或https
    }
])

# 目标索引名称
target_index = 'es_ob_file_vector_6_14'

# 创建索引的mapping
mapping = {

    "mappings": {
        "properties": {
            "ask": {
                "type": "text",
                "fields": {
                    "keyword": {
                        "type": "keyword",
                        "ignore_above": 256
                    }
                }
            },
            "description": {
                "type": "text",
                "fields": {
                    "keyword": {
                        "type": "keyword",
                        "ignore_above": 256
                    }
                }
            },
            "detectionType": {
                "type": "text",
                "fields": {
                    "keyword": {
                        "type": "keyword",
                        "ignore_above": 256
                    }
                }
            },
            "question_vector": {
                "type": "dense_vector",
                "dims": 768,
                "similarity": "cosine"
            }
        }
    }
}


# 创建索引
def create_index():
    if not es.indices.exists(index=target_index):
        es.indices.create(index=target_index, body=mapping)


# 从MySQL读取数据
def read_from_mysql():
    connection = pymysql.connect(**mysql_config)
    try:
        with connection.cursor(pymysql.cursors.DictCursor) as cursor:
            cursor.execute(
                "select description, ask, detection_type from ob_safe_file where detection_type in ( with recursive ob_safe_category_tree as (select *  from ob_safe_file_category where dict_label = '现场检查' and parent_dict_value = 1 union all select p.* from ob_safe_category_tree t join ob_safe_file_category p on t.dict_value = p.parent_dict_value) select dict_value from ob_safe_category_tree where level = 3)"
            )
            rows = cursor.fetchall()
    finally:
        connection.close()
    return rows


def get_vector_simcse(sentence):
    input_ids = torch.tensor(tokenizer.encode(sentence, max_length=512, truncation=True)).unsqueeze(0)
    with torch.no_grad():
        outputs = model(input_ids)
    return outputs.last_hidden_state[:, 0].squeeze(0).numpy()


# 将文档写入Elasticsearch索引
def write_to_target_index(docs):
    with tqdm(total=len(docs)) as pbar:
        for batch_start in range(0, len(docs), 10):
            batch_end = min(batch_start + 10, len(docs))
            batch = docs[batch_start:batch_end]
            helpers.bulk(es, batch)
            pbar.update(len(batch))


# 同步过程
def sync_from_mysql_to_es():
    rows = read_from_mysql()
    actions = []
    for row in rows:
        uestion_embedding = get_vector_simcse(row["ask"])
        action = {
            "_index": target_index,
            "_type": "_doc",  # 添加文档类型
            "_source": {
                "description": row["description"],
                "ask": row["ask"],
                "detectionType": row["detection_type"],
                "question_vector": uestion_embedding
            }
        }
        actions.append(action)

        # 为了避免内存溢出，我们可以批量提交到目标集群
        if len(actions) >= 500:  # 你可以根据需要调整这个批次大小
            write_to_target_index(actions)
            actions.clear()

    # 提交剩余的文档
    if actions:
        write_to_target_index(actions)


# 执行创建索引并同步
create_index()
sync_from_mysql_to_es()
