import os
import sys
import json
import pickle
import datetime
import warnings
from typing import Tuple, List, Dict, Any

import numpy as np
import pandas as pd
import pymysql
from sklearn.preprocessing import LabelEncoder, MinMaxScaler
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.models import Model, load_model

# 添加项目路径，确保可以找到日志模块
sys.path.append("../")
from logs.log import setup_custom_logger
import warnings
# 初始化日志
LOG_DIR = '../Logfiles'
file_name = datetime.datetime.now().strftime('%Y-%m-%d')
my_logger = setup_custom_logger(os.path.join(LOG_DIR, f'{file_name}.log'), log_level="INFO")

# 设置 GPU 和 Pandas 显示选项
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
pd.set_option('display.max_colwidth', None)
pd.set_option('display.max_columns', None)


class RecEmbeddingUpdater(object):
    """
    用于更新用户/物品的 Embedding 并写入 Milvus，可支持多进程并发处理。
    """
    def __init__(self, db_config: Dict[str, Any], group_name: str, milvus_host: str = "127.0.0.1", milvus_port: str = "19530"):
        """
        :param db_config: 包含数据库连接参数的字典（host, user, passwd, port, db, charset）
        :param group_name: 数据组名称，对应配置文件及数据表前缀
        :param milvus_host: Milvus 服务地址
        :param milvus_port: Milvus 服务端口
        """
        self.db_config = db_config
        self.group_name = group_name
        self.milvus_host = milvus_host
        self.milvus_port = milvus_port

    @staticmethod
    def encode_df(data_config: Dict[str, Any], df: pd.DataFrame, data_type: str = 'user_data') -> Tuple[pd.DataFrame, List[str]]:
        """
        对 DataFrame 进行特征编码：
          - 稀疏特征使用 LabelEncoder 编码
          - 密集特征使用 MinMaxScaler 归一化
        """
        sparse_features = []
        dense_features = []
        for feature in data_config['data_config'][data_type]:
            if feature["feature_type"] == "IdFeature":
                sparse_features.append(feature["input_name"])
            elif feature["feature_type"] == "DenseFeature":
                dense_features.append(feature["input_name"])
        my_logger.info("start to encoding...")

        # 加载转换器文件
        sparse_path = os.path.join(data_config['feature_transformer_dir'], 'sparse_labelencoder.pkl')
        dense_path = os.path.join(data_config['feature_transformer_dir'], 'dense_minmaxscaler.pkl')
        with open(sparse_path, 'rb') as f:
            sparse_labelencoder = pickle.load(f)
        with open(dense_path, 'rb') as f:
            dense_minmaxscaler = pickle.load(f)

        with warnings.catch_warnings():
            warnings.simplefilter("ignore")
            # 对稀疏特征进行编码
            for feat in sparse_features:
                lbe: LabelEncoder = sparse_labelencoder[feat]
                is_known = df[feat].isin(lbe.classes_)
                df.loc[~is_known, feat] = -2
                df[feat] = lbe.transform(df[feat])
            # 对密集特征归一化
            if data_type == 'user_data':
                mms = dense_minmaxscaler["user_dense_features"]
            elif data_type == 'item_data':
                mms = dense_minmaxscaler["item_dense_features"]
            df[dense_features] = mms.transform(df[dense_features])
        my_logger.info("encoding success!!!")
        if data_type == 'user_data':
            return df, sparse_features + dense_features + ["user_hist"]
        elif data_type == 'item_data':
            return df, sparse_features + dense_features + ["tag"]
        else:
            my_logger.error("data_type error!!!")
            return df, []

    @staticmethod
    def get_var_feature(data: pd.DataFrame, col: str) -> Tuple[Dict[str, int], np.ndarray, int]:
        """
        处理变长特征：拆分、构建 key2index 映射、进行 padding
        """
        key2index = {}

        def split(x: str) -> List[int]:
            keys = x.split('|')
            for key in keys:
                if key not in key2index:
                    key2index[key] = len(key2index) + 1
            return [key2index[k] for k in keys]

        var_feature = list(map(split, data[col].values))
        max_len = max(len(v) for v in var_feature)
        var_feature_padded = pad_sequences(var_feature, maxlen=max_len, padding='post')
        return key2index, var_feature_padded, max_len

    @staticmethod
    def get_test_var_feature(data: pd.DataFrame, col: str, key2index: Dict[str, int], max_len: int) -> Tuple[np.ndarray, Dict[str, int]]:
        """
        根据已有 key2index 处理测试数据中的变长特征，不足长度进行 padding
        """
        def split(x: str) -> List[int]:
            keys = x.split('|')
            for key in keys:
                if key not in key2index:
                    key2index[key] = len(key2index) + 1
            return [key2index[k] for k in keys]

        test_feature = list(map(split, data[col].values))
        test_feature_padded = pad_sequences(test_feature, maxlen=max_len, padding='post')
        return test_feature_padded, key2index

    def get_user_df(self, conn: pymysql.connections.Connection, data_config: Dict[str, Any], user_maxlen: int = 50) -> Tuple[pd.DataFrame, List[str], np.ndarray]:
        """
        从数据库获取用户数据，并处理 user_hist 及 key2index 更新、编码数据。
        """
        keys_to_fetch = [item['input_name'] for item in data_config['data_config']['user_data']]
        keys_to_fetch.append('user_hist')
        sql_query = f"SELECT {', '.join(keys_to_fetch)} FROM {self.group_name}_all_user_data"
        cursor = conn.cursor()
        cursor.execute(sql_query)
        results = cursor.fetchall()
        user_df = pd.DataFrame(results, columns=keys_to_fetch)

        # 检查是否存在 embedding_index 列
        cursor.execute(f"SHOW COLUMNS FROM {self.group_name}_all_user_data LIKE 'embedding_index';")
        if not cursor.fetchone():
            cursor.execute(f"ALTER TABLE {self.group_name}_all_user_data ADD COLUMN embedding_index INT AFTER UPDATE_DATE")
        user_ids = user_df['USER_ID'].tolist()
        # 使用列表推导生成 embedding_index
        for idx, user_id in enumerate(user_ids):
            sql = f"UPDATE {self.group_name}_all_user_data SET embedding_index = %s WHERE USER_ID = %s"
            cursor.execute(sql, (idx, user_id))
        conn.commit()
        print("user_id对应的索引数据更新成功")

        # 截取 user_hist 的最后 user_maxlen 个行为记录
        user_df['user_hist'] = user_df['user_hist'].str.strip('|').str.split('|').str[-user_maxlen:].str.join('|')

        # 更新 key2index
        user_key2index_dir = data_config['user_key2index']
        os.makedirs(user_key2index_dir, exist_ok=True)
        key2index_path = os.path.join(user_key2index_dir, 'user_key2index.json')
        if os.path.exists(key2index_path):
            with open(key2index_path, 'r') as f:
                user_key2index = json.load(f)
        else:
            user_key2index = {}
            with open(key2index_path, 'w') as f:
                pass
        new_user_hist, user_key2index = self.get_test_var_feature(user_df, 'user_hist', user_key2index, user_maxlen)
        with open(key2index_path, 'w') as f:
            json.dump(user_key2index, f)

        # 编码用户数据
        user_df, all_features = self.encode_df(data_config, user_df, 'user_data')
        return user_df, all_features, new_user_hist

    def get_item_df(self, conn: pymysql.connections.Connection, data_config: Dict[str, Any], item_maxlen: int = 3) -> Tuple[pd.DataFrame, List[str], np.ndarray, List[Any]]:
        """
        从数据库获取物品数据，并处理 tag 序列、更新 key2index 及 tag2recordids 表，然后编码数据。
        """
        keys_to_fetch = [item['input_name'] for item in data_config['data_config']['item_data']]
        sql_query = f"SELECT * FROM {self.group_name}_all_item_data"
        all_item_df = pd.read_sql_query(sql_query, conn)
        item_id_list = all_item_df['ITEM_ID'].tolist()
        item_df = all_item_df[keys_to_fetch]

        # 构建 tag 到 record_id 的映射，并写入 CSV 和数据库
        temp_df = all_item_df[['RECORD_ID', 'tag']].drop_duplicates()
        temp_df['RECORD_ID'] = temp_df['RECORD_ID'].astype(int)
        temp_df['tag'] = temp_df['tag'].astype(str).apply(lambda x: x.split('|'))
        tag_to_record_ids = {}
        for _, row in temp_df.iterrows():
            for tag in row['tag']:
                tag_to_record_ids.setdefault(tag, []).append(row['RECORD_ID'])
        tag_df = pd.DataFrame(list(tag_to_record_ids.items()), columns=['tag', 'RECORD_ID'])
        tag_df['RECORD_ID'] = tag_df['RECORD_ID'].apply(lambda x: '|'.join(map(str, x)))
        tag_df.to_csv(data_config['tag2recordids_path'], index=False)

        with conn.cursor() as cursor:
            cursor.execute(f"DROP TABLE IF EXISTS {self.group_name}_tag2recordId")
            cursor.execute(f"""
                CREATE TABLE {self.group_name}_tag2recordId (
                    tag VARCHAR(255),
                    idList TEXT
                )
            """)
            conn.commit()
            insert_query = f"INSERT INTO {self.group_name}_tag2recordId (tag, idList) VALUES (%s, %s)"
            cursor.executemany(insert_query, tag_df.itertuples(index=False))
            print(f"Table {self.group_name}_tag2recordId created and data inserted")
            conn.commit()

        # 更新 tag 的 key2index
        tag_key2index_dir = data_config['tag_key2index']
        os.makedirs(tag_key2index_dir, exist_ok=True)
        tag_key2index_path = os.path.join(tag_key2index_dir, 'tag_key2index.json')
        if os.path.exists(tag_key2index_path):
            with open(tag_key2index_path, 'r') as f:
                tag_key2index = json.load(f)
        else:
            tag_key2index = {}
            with open(tag_key2index_path, 'w') as f:
                pass
        new_tag, tag_key2index = self.get_test_var_feature(item_df, 'tag', tag_key2index, item_maxlen)
        with open(tag_key2index_path, 'w') as f:
            json.dump(tag_key2index, f)

        # 编码物品数据
        item_df, all_features = self.encode_df(data_config, item_df, 'item_data')
        return item_df, all_features, new_tag, item_id_list

    @staticmethod
    def load_model_save_path(data_config: Dict[str, Any]):
        """
        根据配置文件加载 Keras 模型
        """
        print("加载模型......")
        model = load_model(data_config['model_dir'])
        print("加载完毕......")
        return model

    @staticmethod
    def create_collection(collection_name: str, collection_type: str = "USER_ID"):
        """
        使用 Milvus API 创建 collection
        """
        from pymilvus import Collection, FieldSchema, CollectionSchema, DataType
        item_id_field = FieldSchema(name=collection_type, dtype=DataType.INT64, is_primary=True, auto_id=False)
        embedding_field = FieldSchema(name="embedding", dtype=DataType.FLOAT_VECTOR, dim=128)
        schema = CollectionSchema(fields=[item_id_field, embedding_field], description=f"Collection for item embeddings")
        collection = Collection(name=collection_name, schema=schema)
        print(f"Create collection {collection_name} successfully")
        return collection

    @staticmethod
    def write_to_milvus(collection_name: str, entities: List[Any], collection_type: str = "USER_ID"):
        """
        写入数据到 Milvus，若 collection 不存在则创建
        """
        from pymilvus import connections, Collection, utility
        connections.connect("default", host="10.240.1.3", port="19530")
        if not utility.has_collection(collection_name):
            RecEmbeddingUpdater.create_collection(collection_name, collection_type)
        collection = Collection(name=collection_name)
        collection.insert(entities)
        collection.flush()
        index_params = {
            "index_type": "IVF_FLAT",
            "metric_type": "L2",
            "params": {"nlist": 128},
        }
        collection.create_index(field_name="embedding", index_params=index_params)

    @staticmethod
    def update_user_embedding(model, data_config: Dict[str, Any], user_df: pd.DataFrame,
                              all_features: List[str], new_user_hist: np.ndarray) -> np.ndarray:
        """
        利用模型计算用户 embedding
        """
        user_model_input = [tensor for tensor in model.input if tensor.name in all_features]
        user_tower = Model(inputs=user_model_input, outputs=model.get_layer("user_embedding").output)
        user_input = {name: user_df[name] for name in all_features}
        user_input["user_hist"] = new_user_hist
        user_embedding = user_tower.predict(user_input)
        print("user_embedding shape: ", user_embedding.shape)
        return user_embedding

    @staticmethod
    def update_item_embedding(model, data_config: Dict[str, Any], item_df: pd.DataFrame,
                              all_features: List[str], new_tag: np.ndarray) -> np.ndarray:
        """
        利用模型计算物品 embedding
        """
        item_model_input = [tensor for tensor in model.input if tensor.name in all_features]
        item_tower = Model(inputs=item_model_input, outputs=model.get_layer("item_embedding").output)
        item_input = {name: item_df[name] for name in all_features}
        item_input["tag"] = new_tag
        item_embedding = item_tower.predict(item_input)
        print("item_embedding shape: ", item_embedding.shape)
        return item_embedding

    @staticmethod
    def model_predict(data_config: Dict[str, Any], df: pd.DataFrame, all_features: List[str],
                      new_hist: np.ndarray, data_type: str = 'user_data', collection_name: str = 'test',
                      item_id_list: List[Any] = None) -> None:
        """
        根据 data_type 计算 embedding，并分批写入 Milvus
        """
        print("加载模型......")
        model = load_model(data_config['model_dir'])
        print("加载完毕......")
        if data_type == 'user_data':
            user_embedding = RecEmbeddingUpdater.update_user_embedding(model, data_config, df, all_features, new_hist)
            user_ids = list(range(len(df['USER_ID'].tolist())))
            batch_size = 2000
            collection_name += "_user"
            from tqdm import tqdm
            for i in tqdm(range(0, len(user_ids), batch_size), desc="用户数据写入Milvus", unit="batch"):
                batch_entities = [user_ids[i:i + batch_size], user_embedding[i:i + batch_size]]
                try:
                    RecEmbeddingUpdater.write_to_milvus(collection_name, batch_entities, collection_type="USER_ID")
                except Exception as e:
                    print(f"Error inserting batch {i // batch_size + 1}: {e}")
                    break
            print("用户embedding更新完毕......")
        elif data_type == 'item_data':
            print("item ids", item_id_list[:20])
            item_embedding = RecEmbeddingUpdater.update_item_embedding(model, data_config, df, all_features, new_hist)
            batch_size = 20000
            collection_name += "_item"
            from tqdm import tqdm
            for i in tqdm(range(0, len(item_id_list), batch_size), desc="物品数据写入Milvus", unit="batch"):
                batch_entities = [item_id_list[i:i + batch_size], item_embedding[i:i + batch_size]]
                try:
                    RecEmbeddingUpdater.write_to_milvus(collection_name, batch_entities, collection_type="ITEM_ID")
                except Exception as e:
                    print(f"Error inserting batch {i // batch_size + 1}: {e}")
                    break
            print("物品embedding更新完毕......")
        else:
            print("data_type error")

    def update_embeddings_for_group(self) -> None:
        """
        主入口方法：依次更新当前 group 下的用户和物品的 embedding，并写入 Milvus
        """
        try:
            conn = pymysql.connect(
                host=self.db_config['host'],
                user=self.db_config['user'],
                passwd=self.db_config['passwd'],
                port=self.db_config['port'],
                db=self.db_config['db'],
                charset='utf8'
            )
        except Exception as e:
            my_logger.error(f"数据库连接失败: {e}")
            return

        data_config_path = os.path.join("../configs", f"{self.group_name}.json")
        try:
            with open(data_config_path, 'r') as file:
                data_config = json.load(file)
        except Exception as e:
            my_logger.error(f"加载配置文件失败: {e}")
            conn.close()
            return

        if not os.path.exists(data_config['embedding_saved_dir']):
            os.makedirs(data_config['embedding_saved_dir'], exist_ok=True)

        # 更新用户数据
        try:
            user_df, all_feats_name, new_hist = self.get_user_df(conn, data_config)
            RecEmbeddingUpdater.model_predict(data_config, user_df, all_feats_name, new_hist,
                                                data_type='user_data', collection_name=self.group_name)
        except Exception as e:
            my_logger.error(f"更新用户embedding失败: {e}")

        # 更新物品数据
        try:
            item_df, all_feats_name, new_tag, item_id_list = self.get_item_df(conn, data_config)
            RecEmbeddingUpdater.model_predict(data_config, item_df, all_feats_name, new_tag,
                                                data_type='item_data', collection_name=self.group_name, item_id_list=item_id_list)
        except Exception as e:
            my_logger.error(f"更新物品embedding失败: {e}")

        conn.close()


if __name__ == "__main__":
    # 示例数据库配置
    db_config = {
        'host': '10.240.0.8',
        'user': 'lsp_rec_book',
        'passwd': 'WaFQ_CRqZekqzxSKgr3l',
        'port': 38141,
        'db': 'lsp_rec_book_sql'
    }
    updater = RecEmbeddingUpdater(db_config=db_config, group_name="SUDA", milvus_host="10.240.1.3", milvus_port="19530")
    updater.update_embeddings_for_group()
