import jieba
import pypinyin
from opencc import OpenCC
from sentence_transformers import SentenceTransformer
from elasticsearch7 import Elasticsearch
import csv

# ---------------------- 全局配置 ----------------------
MODEL_TEXT_DIMS = 768  # Sentence-BERT输出维度（固定）
INDEX = "trademarks"

# ---------------------- 文本预处理 ----------------------
cc = OpenCC('t2s')  # 繁转简
jieba.add_word("商标专用词")  # 自定义词典避免误拆

def preprocess_text(text):
    text = cc.convert(text)  # 繁转简
    words = jieba.lcut(text, cut_all=False)  # 分词
    pinyin = ' '.join([''.join(pypinyin.lazy_pinyin(w)) for w in words])  # 拼音
    return ' '.join(words), pinyin

# ---------------------- 模型与数据库初始化 ----------------------
text_model = SentenceTransformer('hfl/chinese-sentence-bert-wwm-ext')
es = Elasticsearch(["http://localhost:9200"])

# ---------------------- 特征提取与入库 ----------------------
def extract_text_feat(text):
    processed_text, pinyin = preprocess_text(text)
    feat = text_model.encode(processed_text)
    return feat / np.linalg.norm(feat), pinyin  # 归一化

def index_text(trademark_id, name):
    try:
        feat, pinyin = extract_text_feat(name)
        es.update(
            index=INDEX,
            id=trademark_id,
            body={"doc": {
                "name": name,
                "name_pinyin": pinyin,
                "text_vector": feat.tolist()
            }, "doc_as_upsert": True}
        )
        print(f"Indexed text: {trademark_id}")
    except Exception as e:
        print(f"Error: {trademark_id} - {str(e)}")

# ---------------------- 批量入库示例 ----------------------
if __name__ == "__main__":
    csv_path = "./trademarks.csv"
    with open(csv_path, 'r', encoding='utf-8') as f:
        reader = csv.DictReader(f)
        for row in reader:
            index_text(row['trademark_id'], row['name'])