import numpy as np
import pandas as pd
import json  
from sklearn.metrics import log_loss, roc_auc_score
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder, MinMaxScaler
# 下面这个会有报错，终端输入：export PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION=python即可解决
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.python.keras.metrics import AUC 
import warnings

import sys
sys.path.append("/opt/wyh/LSP_book_rec/DSSM")


from models.dssm import DSSM
from processing.feature_column import SparseFeat, get_feature_names, VarLenSparseFeat, DenseFeat
from tensorflow.keras.models import Model, load_model
from tensorflow.keras.optimizers import Adam
from utils import Negative_Sample
import os
from tqdm import tqdm
import multiprocessing
import pickle
import pymysql
from contextlib import closing
from pymilvus import connections, Collection, FieldSchema, CollectionSchema, DataType, utility
os.environ['CUDA_VISIBLE_DEVICES'] = '7'  
pd.set_option('display.max_colwidth', None)  # None 表示不限制列宽
pd.set_option('display.max_columns', None)
import sys, datetime
from typing import Tuple, List, Dict, Any
sys.path.append("../")

from logs.log import setup_custom_logger
# 日志系统初始化
_ONE_DAY_IN_SECONDS = 60 * 60 * 24
LOG_DIR = '../Logfiles'
file_name = str(datetime.datetime.now().strftime('%Y-%m-%d'))
my_logger = setup_custom_logger(os.path.join(LOG_DIR, '%s.log' % file_name), log_level="INFO")
# 设置GPU设备（若有需要）
# os.environ['CUDA_VISIBLE_DEVICES'] = '0'
# pd.set_option('display.max_colwidth', None)
# pd.set_option('display.max_columns', None)



def _load_milvus_config():
    # 2. milvus数据库配置
    with open("../global_configs/milvus_db_config.json", 'r') as file:
        milvus_db_config = json.load(file)
    return milvus_db_config


class RecEmbeddingUpdater(object):
    """
    用于更新用户/物品的 Embedding 并写入 Milvus，可支持多进程并发处理。
    """
    def __init__(self, db_config: Dict[str, Any], group_name: str):
        """
        :param db_config: 包含数据库连接参数的字典（host, user, passwd, port, db, charset）
        :param group_name: 数据组名称，对应配置文件及数据表前缀
        :param milvus_host: Milvus 服务地址
        :param milvus_port: Milvus 服务端口
        """
        
        self.db_config = db_config
        self.group_name = group_name
        my_logger.info(f"初始化{self.group_name}成功， 准备开始加载模型并更新embedding...")


    @staticmethod
    def encode_df(data_config: Dict[str, Any], df: pd.DataFrame, data_type: str = 'user_data') -> Tuple[pd.DataFrame, List[str]]:
        """
        对 DataFrame 进行特征编码：
          - 稀疏特征使用 LabelEncoder 编码
          - 密集特征使用 MinMaxScaler 归一化
        """
        sparse_features = []
        dense_features = []
        for feature in data_config['data_config'][data_type]:
            if feature["feature_type"] == "IdFeature":
                sparse_features.append(feature["input_name"])
            elif feature["feature_type"] == "DenseFeature":
                dense_features.append(feature["input_name"])
        my_logger.info("start to encoding...")

        # 加载转换器文件
        sparse_path = os.path.join(data_config['feature_transformer_dir'], 'sparse_labelencoder.pkl')
        dense_path = os.path.join(data_config['feature_transformer_dir'], 'dense_minmaxscaler.pkl')
        with open(sparse_path, 'rb') as f:
            sparse_labelencoder = pickle.load(f)
        with open(dense_path, 'rb') as f:
            dense_minmaxscaler = pickle.load(f)

        with warnings.catch_warnings():
            warnings.simplefilter("ignore")
            # 对稀疏特征进行编码
            for feat in sparse_features:
                lbe: LabelEncoder = sparse_labelencoder[feat]
                is_known = df[feat].isin(lbe.classes_)
                df.loc[~is_known, feat] = -2
                df[feat] = lbe.transform(df[feat])
            # 对密集特征归一化
            if data_type == 'user_data':
                mms = dense_minmaxscaler["user_dense_features"]
            elif data_type == 'item_data':
                mms = dense_minmaxscaler["item_dense_features"]
            df[dense_features] = mms.transform(df[dense_features])
        my_logger.info("encoding success!!!")
        if data_type == 'user_data':
            return df, sparse_features + dense_features + ["user_hist"]
        elif data_type == 'item_data':
            return df, sparse_features + dense_features + ["tag"]
        else:
            my_logger.error("data_type error!!!")
            return df, []

    @staticmethod
    def get_var_feature(data: pd.DataFrame, col: str) -> Tuple[Dict[str, int], np.ndarray, int]:
        """
        处理变长特征：拆分、构建 key2index 映射、进行 padding
        """
        key2index = {}

        def split(x: str) -> List[int]:
            keys = x.split('|')
            for key in keys:
                if key not in key2index:
                    key2index[key] = len(key2index) + 1
            return [key2index[k] for k in keys]

        var_feature = list(map(split, data[col].values))
        max_len = max(len(v) for v in var_feature)
        var_feature_padded = pad_sequences(var_feature, maxlen=max_len, padding='post')
        return key2index, var_feature_padded, max_len

    @staticmethod
    def get_test_var_feature(data: pd.DataFrame, col: str, key2index: Dict[str, int], max_len: int) -> Tuple[np.ndarray, Dict[str, int]]:
        """
        根据已有 key2index 处理测试数据中的变长特征，不足长度进行 padding
        """
        def split(x: str) -> List[int]:
            keys = x.split('|')
            for key in keys:
                if key not in key2index:
                    key2index[key] = len(key2index) + 1
            return [key2index[k] for k in keys]

        test_feature = list(map(split, data[col].values))
        test_feature_padded = pad_sequences(test_feature, maxlen=max_len, padding='post')
        return test_feature_padded, key2index

    def get_user_df(self, conn: pymysql.connections.Connection, data_config: Dict[str, Any], user_maxlen: int = 50) -> Tuple[pd.DataFrame, List[str], np.ndarray]:
        """
        从数据库获取用户数据，并处理 user_hist 及 key2index 更新、编码数据。
        """
        keys_to_fetch = [item['input_name'] for item in data_config['data_config']['user_data']]
        keys_to_fetch.append('user_hist')
        sql_query = f"SELECT {', '.join(keys_to_fetch)} FROM {self.group_name}_all_user_data"
        cursor = conn.cursor()
        cursor.execute(sql_query)
        results = cursor.fetchall()
        user_df = pd.DataFrame(results, columns=keys_to_fetch)

        # 检查是否存在 embedding_index 列
        cursor.execute(f"SHOW COLUMNS FROM {self.group_name}_all_user_data LIKE 'embedding_index';")
        if not cursor.fetchone():
            cursor.execute(f"ALTER TABLE {self.group_name}_all_user_data ADD COLUMN embedding_index INT AFTER UPDATE_DATE")
        user_ids = user_df['USER_ID'].tolist()
        # 使用列表推导生成 embedding_index
        for idx, user_id in enumerate(user_ids):
            sql = f"UPDATE {self.group_name}_all_user_data SET embedding_index = %s WHERE USER_ID = %s"
            cursor.execute(sql, (idx, user_id))
        conn.commit()
        print("user_id对应的索引数据更新成功")

        # 截取 user_hist 的最后 user_maxlen 个行为记录
        user_df['user_hist'] = user_df['user_hist'].str.strip('|').str.split('|').str[-user_maxlen:].str.join('|')

        # 更新 key2index
        user_key2index_dir = data_config['user_key2index']
        os.makedirs(user_key2index_dir, exist_ok=True)
        key2index_path = os.path.join(user_key2index_dir, 'user_key2index.json')
        if os.path.exists(key2index_path):
            with open(key2index_path, 'r') as f:
                user_key2index = json.load(f)
        else:
            user_key2index = {}
            with open(key2index_path, 'w') as f:
                pass
        new_user_hist, user_key2index = self.get_test_var_feature(user_df, 'user_hist', user_key2index, user_maxlen)
        with open(key2index_path, 'w') as f:
            json.dump(user_key2index, f)

        # 编码用户数据
        user_df, all_features = self.encode_df(data_config, user_df, 'user_data')
        return user_df, all_features, new_user_hist

    def get_item_df(self, conn: pymysql.connections.Connection, data_config: Dict[str, Any], item_maxlen: int = 3) -> Tuple[pd.DataFrame, List[str], np.ndarray, List[Any]]:
        """
        从数据库获取物品数据，并处理 tag 序列、更新 key2index 及 tag2recordids 表，然后编码数据。
        """
        keys_to_fetch = [item['input_name'] for item in data_config['data_config']['item_data']]
        sql_query = f"SELECT * FROM {self.group_name}_all_item_data"
        all_item_df = pd.read_sql_query(sql_query, conn)
        item_id_list = all_item_df['ITEM_ID'].tolist()
        item_df = all_item_df[keys_to_fetch]

        # 构建 tag 到 record_id 的映射，并写入 CSV 和数据库
        temp_df = all_item_df[['RECORD_ID', 'tag']].drop_duplicates()
        temp_df['RECORD_ID'] = temp_df['RECORD_ID'].astype(int)
        temp_df['tag'] = temp_df['tag'].astype(str).apply(lambda x: x.split('|'))
        tag_to_record_ids = {}
        for _, row in temp_df.iterrows():
            for tag in row['tag']:
                tag_to_record_ids.setdefault(tag, []).append(row['RECORD_ID'])
        tag_df = pd.DataFrame(list(tag_to_record_ids.items()), columns=['tag', 'RECORD_ID'])
        tag_df['RECORD_ID'] = tag_df['RECORD_ID'].apply(lambda x: '|'.join(map(str, x)))
        tag_df.to_csv(data_config['tag2recordids_path'], index=False)

        with conn.cursor() as cursor:
            cursor.execute(f"DROP TABLE IF EXISTS {self.group_name}_tag2recordId")
            cursor.execute(f"""
                CREATE TABLE {self.group_name}_tag2recordId (
                    tag VARCHAR(255),
                    idList TEXT
                )
            """)
            conn.commit()
            insert_query = f"INSERT INTO {self.group_name}_tag2recordId (tag, idList) VALUES (%s, %s)"
            cursor.executemany(insert_query, tag_df.itertuples(index=False))
            print(f"Table {self.group_name}_tag2recordId created and data inserted")
            conn.commit()

        # 更新 tag 的 key2index
        tag_key2index_dir = data_config['tag_key2index']
        os.makedirs(tag_key2index_dir, exist_ok=True)
        tag_key2index_path = os.path.join(tag_key2index_dir, 'tag_key2index.json')
        if os.path.exists(tag_key2index_path):
            with open(tag_key2index_path, 'r') as f:
                tag_key2index = json.load(f)
        else:
            tag_key2index = {}
            with open(tag_key2index_path, 'w') as f:
                pass
        new_tag, tag_key2index = self.get_test_var_feature(item_df, 'tag', tag_key2index, item_maxlen)
        with open(tag_key2index_path, 'w') as f:
            json.dump(tag_key2index, f)

        # 编码物品数据
        item_df, all_features = self.encode_df(data_config, item_df, 'item_data')
        return item_df, all_features, new_tag, item_id_list

    @staticmethod
    def load_model_save_path(data_config: Dict[str, Any]):
        """
        根据配置文件加载 Keras 模型
        """
        print("加载模型......")
        model = load_model(data_config['model_dir'])
        print("加载完毕......")
        return model

    @staticmethod
    def create_collection(collection_name: str, collection_type: str = "USER_ID"):
        """
        使用 Milvus API 创建 collection
        """
        from pymilvus import Collection, FieldSchema, CollectionSchema, DataType
        item_id_field = FieldSchema(name=collection_type, dtype=DataType.INT64, is_primary=True, auto_id=False)
        embedding_field = FieldSchema(name="embedding", dtype=DataType.FLOAT_VECTOR, dim=128)
        schema = CollectionSchema(fields=[item_id_field, embedding_field], description=f"Collection for item embeddings")
        collection = Collection(name=collection_name, schema=schema)
        print(f"Create collection {collection_name} successfully")
        return collection

    @staticmethod
    def write_to_milvus(collection_name: str, entities: List[Any], collection_type: str = "USER_ID"):
        """
        写入数据到 Milvus，若 collection 不存在则创建
        """
        from pymilvus import connections, Collection, utility
        milvus_config = _load_milvus_config()
        milvus_host = milvus_config["MILVUS_HOST"]
        milvus_port = milvus_config["MILVUS_PORT"]
        milvus_user = milvus_config["USERNAME"]
        milvus_pwd = milvus_config["PASSWORD"]
        connections.connect(alias="default", host=milvus_host, port=milvus_port, user=milvus_user, password=milvus_pwd)
        if not utility.has_collection(collection_name):
            RecEmbeddingUpdater.create_collection(collection_name, collection_type)
        collection = Collection(name=collection_name)
        collection.insert(entities)
        collection.flush()
        index_params = {
            "index_type": "IVF_FLAT",
            "metric_type": "L2",
            "params": {"nlist": 128},
        }
        collection.create_index(field_name="embedding", index_params=index_params)

    @staticmethod
    def update_user_embedding(model, data_config: Dict[str, Any], user_df: pd.DataFrame,
                              all_features: List[str], new_user_hist: np.ndarray) -> np.ndarray:
        """
        利用模型计算用户 embedding
        """
        user_model_input = [tensor for tensor in model.input if tensor.name in all_features]
        user_tower = Model(inputs=user_model_input, outputs=model.get_layer("user_embedding").output)
        user_input = {name: user_df[name] for name in all_features}
        user_input["user_hist"] = new_user_hist
        user_embedding = user_tower.predict(user_input)
        print("user_embedding shape: ", user_embedding.shape)
        return user_embedding

    @staticmethod
    def update_item_embedding(model, data_config: Dict[str, Any], item_df: pd.DataFrame,
                              all_features: List[str], new_tag: np.ndarray) -> np.ndarray:
        """
        利用模型计算物品 embedding
        """
        item_model_input = [tensor for tensor in model.input if tensor.name in all_features]
        item_tower = Model(inputs=item_model_input, outputs=model.get_layer("item_embedding").output)
        item_input = {name: item_df[name] for name in all_features}
        item_input["tag"] = new_tag
        item_embedding = item_tower.predict(item_input)
        print("item_embedding shape: ", item_embedding.shape)
        return item_embedding

    @staticmethod
    def model_predict(data_config: Dict[str, Any], df: pd.DataFrame, all_features: List[str],
                      new_hist: np.ndarray, data_type: str = 'user_data', collection_name: str = 'test',
                      item_id_list: List[Any] = None) -> None:
        """
        根据 data_type 计算 embedding，并分批写入 Milvus
        """
        print("加载模型......")
        model = load_model(data_config['model_dir'])
        print("加载完毕......")
        if data_type == 'user_data':
            user_embedding = RecEmbeddingUpdater.update_user_embedding(model, data_config, df, all_features, new_hist)
            user_ids = list(range(len(df['USER_ID'].tolist())))
            batch_size = 2000
            collection_name += "_user"
            from tqdm import tqdm
            for i in tqdm(range(0, len(user_ids), batch_size), desc="用户数据写入Milvus", unit="batch"):
                batch_entities = [user_ids[i:i + batch_size], user_embedding[i:i + batch_size]]
                try:
                    RecEmbeddingUpdater.write_to_milvus(collection_name, batch_entities, collection_type="USER_ID")
                except Exception as e:
                    print(f"Error inserting batch {i // batch_size + 1}: {e}")
                    break
            print("用户embedding更新完毕......")
        elif data_type == 'item_data':
            print("item ids", item_id_list[:20])
            item_embedding = RecEmbeddingUpdater.update_item_embedding(model, data_config, df, all_features, new_hist)
            batch_size = 20000
            collection_name += "_item"
            from tqdm import tqdm
            for i in tqdm(range(0, len(item_id_list), batch_size), desc="物品数据写入Milvus", unit="batch"):
                batch_entities = [item_id_list[i:i + batch_size], item_embedding[i:i + batch_size]]
                try:
                    RecEmbeddingUpdater.write_to_milvus(collection_name, batch_entities, collection_type="ITEM_ID")
                except Exception as e:
                    print(f"Error inserting batch {i // batch_size + 1}: {e}")
                    break
            print("物品embedding更新完毕......")
        else:
            print("data_type error")

    def update_embeddings_for_group(self) -> None:
        """
        主入口方法：依次更新当前 group 下的用户和物品的 embedding，并写入 Milvus
        """
        try:
            conn = pymysql.connect(
                host=self.db_config['host'],
                user=self.db_config['user'],
                passwd=self.db_config['passwd'],
                port=self.db_config['port'],
                db=self.db_config['db'],
                charset='utf8'
            )
        except Exception as e:
            my_logger.error(f"数据库连接失败: {e}")
            return

        data_config_path = os.path.join("../configs", f"{self.group_name}.json")
        try:
            with open(data_config_path, 'r') as file:
                data_config = json.load(file)
        except Exception as e:
            my_logger.error(f"加载配置文件失败: {e}")
            conn.close()
            return

        if not os.path.exists(data_config['embedding_saved_dir']):
            os.makedirs(data_config['embedding_saved_dir'], exist_ok=True)

        # 更新用户数据
        try:
            user_df, all_feats_name, new_hist = self.get_user_df(conn, data_config)
            RecEmbeddingUpdater.model_predict(data_config, user_df, all_feats_name, new_hist,
                                                data_type='user_data', collection_name=self.group_name)
        except Exception as e:
            my_logger.error(f"更新用户embedding失败: {e}")

        # 更新物品数据
        try:
            item_df, all_feats_name, new_tag, item_id_list = self.get_item_df(conn, data_config)
            RecEmbeddingUpdater.model_predict(data_config, item_df, all_feats_name, new_tag,
                                                data_type='item_data', collection_name=self.group_name, item_id_list=item_id_list)
        except Exception as e:
            my_logger.error(f"更新物品embedding失败: {e}")

        conn.close()
        return {"status": "done", "group": self.group_name}


if __name__ == "__main__":
    # 1. 数据库配置
    with open("../global_configs/mysql_db_connect_config.json", 'r') as file:
        db_config = json.load(file)
    

    # print(db_config)
    updater = RecEmbeddingUpdater(db_config=db_config, group_name="GZU")
    updater.update_embeddings_for_group()






# exit()

# class RecEmbeddingUpdater(object):
#     """
#     用于更新用户/物品的Embedding并写入Milvus，可多进程并发处理。
#     """
#     def __init__(self, db_config, group_name, milvus_host="127.0.0.1", milvus_port="19530"):
#         """
#         db_config: dict，包含数据库连接参数（host, user, passwd, port, db, charset）
#         group_name: str，对应分组名称，用于选择数据配置文件以及数据表名称
#         milvus_host, milvus_port: Milvus服务的地址和端口
#         """
#         self.db_config = db_config
#         self.group_name = group_name
#         self.milvus_host = milvus_host
#         self.milvus_port = milvus_port

#     @staticmethod
#     def encode_df(data_config, df_to_encode, data_type='user_data'):
#         """
#         对 DataFrame 进行特征编码：
#          - 对稀疏特征使用 LabelEncoder 编码
#          - 对密集特征使用 MinMaxScaler 归一化
#         """
#         sparse_features = [] 
#         dense_features = [] 
#         for feature in data_config['data_config'][data_type]:
#             if feature["feature_type"] == "IdFeature":
#                 sparse_features.append(feature["input_name"])
#             elif feature["feature_type"] == "DenseFeature":
#                 dense_features.append(feature["input_name"])
        
#         my_logger.info("start to encoding...")
#         # 加载预先保存好的转换器
#         with open(os.path.join(data_config['feature_transformer_dir'], 'sparse_labelencoder.pkl'), 'rb') as f:  
#             sparse_labelencoder = pickle.load(f)
#         with open(os.path.join(data_config['feature_transformer_dir'], 'dense_minmaxscaler.pkl'), 'rb') as f:  
#             dense_minmaxscaler = pickle.load(f)
        
#         with warnings.catch_warnings():
#             warnings.simplefilter("ignore")
#             # 对稀疏特征进行 LabelEncoding
#             for feat in sparse_features:
#                 lbe = sparse_labelencoder[feat]
#                 is_known = df_to_encode[feat].isin(lbe.classes_)  
#                 df_to_encode.loc[~is_known, feat] = -2
#                 df_to_encode[feat] = lbe.transform(df_to_encode[feat])
#             # 对密集特征归一化
#             if data_type == 'user_data':
#                 mms = dense_minmaxscaler["user_dense_features"]
#             elif data_type == 'item_data':
#                 mms = dense_minmaxscaler["item_dense_features"]
#             df_to_encode[dense_features] = mms.transform(df_to_encode[dense_features])
#         my_logger.info("encoding success!!!")
#         if data_type == 'user_data':
#             return df_to_encode, sparse_features + dense_features + ["user_hist"]
#         elif data_type == 'item_data':
#             return df_to_encode, sparse_features + dense_features + ["tag"]
#         else:
#             my_logger.error("data_type error!!!")
#             return None

#     @staticmethod
#     def get_var_feature(data, col):
#         """
#         对变长特征进行处理：分割、建立 key2index 映射，并进行 padding
#         """
#         key2index = {}
#         def split(x):
#             key_ans = x.split('|')
#             for key in key_ans:
#                 if key not in key2index:
#                     key2index[key] = len(key2index) + 1
#             return list(map(lambda x: key2index[x], key_ans))
#         var_feature = list(map(split, data[col].values))
#         var_feature_length = np.array(list(map(len, var_feature)))
#         max_len = max(var_feature_length)
#         var_feature = pad_sequences(var_feature, maxlen=max_len, padding='post')
#         return key2index, var_feature, max_len

#     @staticmethod
#     def get_test_var_feature(data, col, initial_key2index, max_len):
#         """
#         对测试数据中的变长特征进行处理（根据已有 key2index），不足长度则进行 padding
#         """
#         key2index = initial_key2index
#         def split(x):
#             key_ans = x.split('|')
#             for key in key_ans:
#                 if key not in key2index:
#                     key2index[key] = len(key2index) + 1
#             return list(map(lambda x: key2index[x], key_ans))
#         test_hist = list(map(split, data[col].values))
#         test_hist = pad_sequences(test_hist, maxlen=max_len, padding='post')
#         return test_hist, key2index

#     def get_user_df(self, conn, data_config, user_maxlen=50):
#         """
#         从数据库获取用户数据，处理 user_hist 序列、更新 key2index 并编码数据。
#         """
#         keys_to_fetch = [item['input_name'] for item in data_config['data_config']['user_data']]
#         keys_to_fetch.append('user_hist')
#         sql_query = f"SELECT {', '.join(keys_to_fetch)} FROM {self.group_name}_all_user_data"
#         cursor = conn.cursor()
#         cursor.execute(sql_query)
#         results = cursor.fetchall()
#         user_df = pd.DataFrame(results, columns=keys_to_fetch)

#         # 检查并添加 embedding_index 列
#         cursor.execute(f"SHOW COLUMNS FROM {self.group_name}_all_user_data LIKE 'embedding_index';")
#         column_exists = cursor.fetchone()
#         if not column_exists:
#             sql = f"ALTER TABLE {self.group_name}_all_user_data ADD COLUMN embedding_index INT AFTER UPDATE_DATE"
#             cursor.execute(sql)
#         user_id_list = user_df['USER_ID'].tolist()
#         user_embedding_index = [i for i in range(len(user_id_list))]
#         for user_id, embedding_index in zip(user_id_list, user_embedding_index):
#             sql = f"UPDATE {self.group_name}_all_user_data SET embedding_index = %s WHERE USER_ID = %s"
#             cursor.execute(sql, (embedding_index, user_id))
#         conn.commit()
#         print("user_id对应的索引数据更新成功")

#         # 提取用户行为序列（截取最后50条记录）
#         user_df['user_hist'] = user_df['user_hist'].str.strip('|').str.split('|').str[-50:].str.join('|')
        
#         # 更新用户的 key2index
#         user_key2index_dir = data_config['user_key2index']
#         file_path = os.path.join(user_key2index_dir, 'user_key2index.json')
#         if not os.path.exists(user_key2index_dir):   
#             os.makedirs(user_key2index_dir)
#         file_exist_flag = os.path.exists(file_path)
#         if not file_exist_flag:
#             user_key2index = {}
#             with open(file_path, 'w') as f:
#                 pass
#         else:
#             with open(file_path, 'r') as f:
#                 user_key2index = json.load(f)
#         new_user_hist, user_key2index = self.get_test_var_feature(user_df, 'user_hist', user_key2index, user_maxlen)
#         with open(file_path, 'w') as f:
#             json.dump(user_key2index, f)

#         # 对用户数据编码
#         user_df, all_features = self.encode_df(data_config, user_df, 'user_data')
#         return user_df, all_features, new_user_hist

#     def get_item_df(self, conn, data_config, item_maxlen=3):
#         """
#         从数据库获取物品数据，处理 tag 序列、更新 key2index 并编码数据，
#         同时更新 tag2recordids 表。
#         """
#         keys_to_fetch = [item['input_name'] for item in data_config['data_config']['item_data']]
#         sql_query = f"SELECT * FROM {self.group_name}_all_item_data"
#         all_item_df = pd.read_sql_query(sql_query, conn)
#         item_id_list = all_item_df['ITEM_ID'].tolist()
#         item_df = all_item_df[keys_to_fetch]

#         # 构建 tag 到 record_id 的映射，并存入 CSV 文件和数据库
#         temp_df = all_item_df[['RECORD_ID', 'tag']].drop_duplicates()
#         temp_df['RECORD_ID'] = temp_df['RECORD_ID'].astype(int)
#         temp_df['tag'] = temp_df['tag'].astype(str)
#         temp_df['tag'] = temp_df['tag'].apply(lambda x: x.split('|'))
#         tag_to_record_ids = {}
#         for _, row in temp_df.iterrows():
#             record_id = row['RECORD_ID']
#             tags = row['tag']
#             for tag in tags:
#                 tag_to_record_ids.setdefault(tag, []).append(record_id)
#         tag_df = pd.DataFrame(list(tag_to_record_ids.items()), columns=['tag', 'RECORD_ID'])
#         tag_df['RECORD_ID'] = tag_df['RECORD_ID'].apply(lambda x: '|'.join(map(str, x)))
#         tag_df.to_csv(data_config['tag2recordids_path'], index=False)

#         with conn.cursor() as cursor:
#             cursor.execute(f"DROP TABLE IF EXISTS {self.group_name}_tag2recordId")
#             cursor.execute(f"""
#                 CREATE TABLE {self.group_name}_tag2recordId (
#                     tag VARCHAR(255),
#                     idList TEXT
#                 )
#             """)
#             conn.commit()
#             insert_query = f"INSERT INTO {self.group_name}_tag2recordId (tag, idList) VALUES (%s, %s)"
#             cursor.executemany(insert_query, tag_df.itertuples(index=False))
#             print(f"Table {self.group_name}_tag2recordId created and data inserted")
#             conn.commit()

#         # 更新 tag 的 key2index
#         tag_key2index_dir = data_config['tag_key2index']
#         file_path = os.path.join(tag_key2index_dir, 'tag_key2index.json')
#         if not os.path.exists(tag_key2index_dir):   
#             os.makedirs(tag_key2index_dir)
#         file_exist_flag = os.path.exists(file_path)
#         if not file_exist_flag:
#             tag_key2index = {}
#             with open(file_path, 'w') as f:
#                 pass
#         else:
#             with open(file_path, 'r') as f:
#                 tag_key2index = json.load(f)
#         new_tag, tag_key2index = self.get_test_var_feature(item_df, 'tag', tag_key2index, item_maxlen)
#         with open(file_path, 'w') as f:
#             json.dump(tag_key2index, f)
#         # 对物品数据编码
#         item_df, all_features = self.encode_df(data_config, item_df, 'item_data')
#         return item_df, all_features, new_tag, item_id_list

#     @staticmethod
#     def load_model_save_path(data_config):
#         """
#         加载模型
#         """
#         print("加载模型......")
#         model_save_path = data_config['model_dir']
#         print("model_save_path", model_save_path)
#         model = load_model(model_save_path)
#         print("加载完毕......")
#         return model

#     @staticmethod
#     def create_collection(collection_name, collection_type="USER_ID"):
#         """
#         创建 Milvus collection
#         """
#         from pymilvus import Collection, FieldSchema, CollectionSchema, DataType
#         item_id_field = FieldSchema(name=collection_type, dtype=DataType.INT64, is_primary=True, auto_id=False)
#         embedding_field = FieldSchema(name="embedding", dtype=DataType.FLOAT_VECTOR, dim=128)
#         fields = [item_id_field, embedding_field]
#         schema = CollectionSchema(fields=fields, description=f"Collection for item embeddings")
#         collection = Collection(name=collection_name, schema=schema)
#         print(f"Create collection {collection_name} successfully")
#         return collection

#     @staticmethod
#     def write_to_milvus(collection_name, entities, collection_type="USER_ID"):
#         """
#         写入数据到 Milvus，若 collection 不存在则创建
#         """
#         from pymilvus import connections, Collection, utility, FieldSchema, CollectionSchema, DataType
#         connections.connect("default", host="10.240.1.3", port="19530")
#         if not utility.has_collection(collection_name):
#             RecEmbeddingUpdater.create_collection(collection_name, collection_type)
#         collection = Collection(name=collection_name)
#         collection.insert(entities)
#         collection.flush()
#         index_params = {
#             "index_type": "IVF_FLAT",
#             "metric_type": "L2",
#             "params": {"nlist": 128},
#         }
#         collection.create_index(field_name="embedding", index_params=index_params)

#     @staticmethod
#     def update_user_embedding(model, data_config, user_df, all_features, new_user_hist):
#         """
#         通过模型计算用户 embedding
#         """
#         user_model_input = []
#         for input_tensor in model.input:
#             if input_tensor.name in all_features:
#                 user_model_input.append(input_tensor)
#         user_tower = Model(inputs=user_model_input, outputs=model.get_layer("user_embedding").output)
#         user_tower_data_input = {name: user_df[name] for name in all_features}
#         user_tower_data_input["user_hist"] = new_user_hist
#         user_embedding = user_tower.predict(user_tower_data_input)
#         print("user_embedding shape: ", user_embedding.shape)
#         return user_embedding

#     @staticmethod
#     def update_item_embedding(model, data_config, item_df, all_features, new_tag):
#         """
#         通过模型计算物品 embedding
#         """
#         item_model_input = []
#         for input_tensor in model.input:
#             if input_tensor.name in all_features:
#                 item_model_input.append(input_tensor)
#         item_tower = Model(inputs=item_model_input, outputs=model.get_layer("item_embedding").output)
#         item_tower_data_input = {name: item_df[name] for name in all_features}
#         item_tower_data_input["tag"] = new_tag
#         item_embedding = item_tower.predict(item_tower_data_input)
#         print("item_embedding shape: ", item_embedding.shape)
#         return item_embedding

#     @staticmethod
#     def model_predict(data_config, df, all_features, new_hist, data_type='user_data', collection_name='test', item_id_list=None):
#         """
#         根据 data_type 加载模型计算 embedding，并分批写入 Milvus。
#         """
#         print("加载模型......")
#         model_save_path = data_config['model_dir']
#         print("model_save_path", model_save_path)
#         model = load_model(model_save_path)
#         print("加载完毕......")

#         if data_type == 'user_data':
#             user_embedding = RecEmbeddingUpdater.update_user_embedding(model, data_config, df, all_features, new_hist)
#             user_ids = df['USER_ID'].tolist()
#             # 此处 user_ids 逻辑上用于映射 embedding index
#             user_ids = [i for i in range(len(user_ids))]
#             batch_size = 2000
#             collection_name = collection_name + "_user"
#             from tqdm import tqdm
#             with tqdm(total=len(user_ids), desc="用户数据写入Milvus", unit="batch") as pbar:
#                 for i in range(0, len(user_ids), batch_size):
#                     batch_entities = [user_ids[i:i + batch_size], user_embedding[i:i + batch_size]]
#                     try:
#                         RecEmbeddingUpdater.write_to_milvus(collection_name, batch_entities, collection_type="USER_ID")
#                         pbar.update(batch_size)
#                     except Exception as e:
#                         print(f"Error inserting batch {i // batch_size + 1}: {e}")
#                         break
#             print("用户embedding更新完毕......")
#         elif data_type == 'item_data':
#             item_ids = item_id_list
#             print("item ids", item_ids[:20])
#             item_embedding = RecEmbeddingUpdater.update_item_embedding(model, data_config, df, all_features, new_hist)
#             batch_size = 20000
#             collection_name = collection_name + "_item"
#             from tqdm import tqdm
#             with tqdm(total=len(item_ids), desc="物品数据写入Milvus", unit="batch") as pbar:
#                 for i in range(0, len(item_ids), batch_size):
#                     batch_entities = [item_ids[i:i + batch_size], item_embedding[i:i + batch_size]]
#                     try:
#                         RecEmbeddingUpdater.write_to_milvus(collection_name, batch_entities, collection_type="ITEM_ID")
#                         pbar.update(batch_size)
#                     except Exception as e:
#                         print(f"Error inserting batch {i // batch_size + 1}: {e}")
#                         break
#             print("物品embedding更新完毕......")
#         else:
#             print("data_type error")

#     def update_embeddings_for_group(self):
#         """
#         主入口：根据当前 group_name 依次更新用户和物品的 embedding，
#         并调用 Milvus 写入。
#         """
#         # 连接数据库
#         conn = pymysql.connect(
#             host=self.db_config['host'],
#             user=self.db_config['user'],
#             passwd=self.db_config['passwd'],
#             port=self.db_config['port'],
#             db=self.db_config['db'],
#             charset='utf8'
#         )

#         # 加载 group 对应的配置文件（例如：../configs/{group_name}.json）
#         data_config_path = os.path.join("../configs", f"{self.group_name}.json")
#         with open(data_config_path, 'r') as file:
#             data_config = json.load(file)
#         # 检查 embedding 保存目录
#         if not os.path.exists(data_config['embedding_saved_dir']):
#             os.makedirs(data_config['embedding_saved_dir'])

#         # 更新用户数据
#         user_df, all_feats_name, new_hist = self.get_user_df(conn, data_config)
#         RecEmbeddingUpdater.model_predict(data_config, user_df, all_feats_name, new_hist,
#                                             data_type='user_data', collection_name=self.group_name)

#         # 更新物品数据
#         item_df, all_feats_name, new_tag, item_id_list = self.get_item_df(conn, data_config)
#         RecEmbeddingUpdater.model_predict(data_config, item_df, all_feats_name, new_tag,
#                                             data_type='item_data', collection_name=self.group_name, item_id_list=item_id_list)

#         conn.close()


# # if __name__ == "__main__":
# #     # 示例：配置数据库参数
# #     db_config = {
# #         'host': '10.240.0.8',
# #         'user': 'lsp_rec_book',
# #         'passwd': 'WaFQ_CRqZekqzxSKgr3l',
# #         'port': 38141,
# #         'db': 'lsp_rec_book_sql'
# #     }
# #     # group_name 根据实际情况填写，例如 "JXSTNU"
# #     updater = RecEmbeddingUpdater(db_config=db_config, group_name="JXSTNU", milvus_host="10.240.1.3", milvus_port="19530")
# #     updater.update_embeddings_for_group()


# if __name__ == "__main__":
#     """
#     主入口示例
#     """
#     # 1. 数据库配置
#     with open("../global_configs/mysql_db_connect_config.json", 'r') as file:
#         db_config = json.load(file)
#     # print(db_config)
#     # 2. 实例化更新器
#     updater = RecEmbeddingUpdater(db_config=db_config, group_name="SUDA", milvus_host="10.240.1.3", milvus_port="19530")

#     # 3. 指定groupName，调用更新
    
#     updater.update_embeddings_for_group()
 