import os
import sys
import json
import time
import pickle
import random
import datetime
import warnings
import numpy as np
import pandas as pd
import pymysql

from sklearn.metrics import log_loss, roc_auc_score
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder, MinMaxScaler
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import Adam

# 注意：需要保证这两个模块能被正常导入
from models.dssm import DSSM
from processing.feature_column import SparseFeat, DenseFeat, VarLenSparseFeat

import faiss

os.environ['CUDA_VISIBLE_DEVICES'] = '2,3,7'  
import sys
sys.path.append("../")
from logs.log import setup_custom_logger
# 日志系统初始化
_ONE_DAY_IN_SECONDS = 60 * 60 * 24
LOG_DIR = '/opt/wyh/LSP_book_rec/Logfiles'
file_name = str(datetime.datetime.now().strftime('%Y-%m-%d'))
my_logger = setup_custom_logger(os.path.join(LOG_DIR, '%s.log' % file_name), log_level="INFO")

class RecSysEngine:
    """
    基于DSSM的推荐系统流水线封装。
    
    参数：
      - config_path: 配置文件路径（json格式）
      - group_name: 机构名称，用于拼接数据库表名等
    """
    def __init__(self, config_path, group_name):
        self.config_path = config_path
        self.group_name = group_name
        self.data_config = self._load_config()
        self.conn = self._create_connection()
        # 后续流程中会设置各个属性，如train、val、test等

    def _load_config(self):
        with open(self.config_path, 'r') as f:
            config = json.load(f)
        return config

    def _create_connection(self):
        # 连接数据库，注意这里连接参数可根据需要调整
        conn = pymysql.connect(
            host='10.0.52.44',
            user='lsp_rec_book',
            passwd='WaFQ_CRqZekqzxSKgr3l',
            port=38141,
            db='lsp_rec_book_sql',
            charset='utf8'
        )
        return conn

    def get_train_val_test_data(self, user_maxlen=50):
        """
        从数据库中读取用户与物品数据，构造训练、验证和测试集，并保存为CSV文件。
        同时生成负采样数据。
        """
        data_config = self.data_config

        # 获取用户数据（包括用户行为序列）
        keys_to_fetch = [item['input_name'] for item in data_config['data_config']['user_data']]
        keys_to_fetch.append('user_hist')
        sql_query = f"SELECT {', '.join(keys_to_fetch)} FROM {self.group_name}_all_user_data"
        cursor = self.conn.cursor()
        cursor.execute(sql_query)
        results = cursor.fetchall()
        user_df = pd.DataFrame(results, columns=keys_to_fetch)

        # 处理user_hist字段，只保留最近50个记录
        user_df['user_hist'] = user_df['user_hist'].str.strip('|').str.split('|').str[-int(user_maxlen):].str.join('|')

        # 划分训练、验证、测试集
        train_df, temp_df = train_test_split(user_df, test_size=0.2, random_state=42)
        val_df, test_df = train_test_split(temp_df, test_size=0.5, random_state=42)

        # 获取物品数据
        keys_to_fetch = [item['input_name'] for item in data_config['data_config']['item_data']]
        sql_query = f"SELECT {', '.join(keys_to_fetch)} FROM {self.group_name}_all_item_data"
        cursor = self.conn.cursor()
        cursor.execute(sql_query)
        results = cursor.fetchall()
        item_df = pd.DataFrame(results, columns=keys_to_fetch)
        unique_item_ids = item_df['ITEM_ID'].unique().tolist()
        len_item = len(unique_item_ids)

        # 保存数据目录
        saved_path = data_config['saved_processed_data_path']
        if not os.path.exists(saved_path):
            os.makedirs(saved_path)

        datasets = [(train_df, 'train_data.csv'), (val_df, 'val_data.csv'), (test_df, 'test_data.csv')]
        processed_dfs = {}
        for df, filename in datasets:
            train_set = []
            for userID, hist in zip(df['USER_ID'], df['user_hist']):
                pos_list = [int(x) for x in hist.split('|') if x]
                # 定义负样本采样函数
                def get_neg():
                    neg = pos_list[0]
                    while neg in pos_list:
                        neg = unique_item_ids[random.randint(0, len_item - 1)]
                    return neg

                neg_list_1 = [get_neg() for _ in range(len(pos_list))]
                neg_list_2 = [get_neg() for _ in range(len(pos_list))]
                neg_list_3 = [get_neg() for _ in range(len(pos_list))]
                neg_list_4 = [get_neg() for _ in range(len(pos_list))]

                for i in range(1, len(pos_list)):
                    hist_seq = pos_list[:i][-50:]
                    hist_str = '|'.join(map(str, hist_seq))
                    train_set.append((userID, hist_str, pos_list[i], 1))
                    train_set.append((userID, hist_str, neg_list_1[i], 0))
                    train_set.append((userID, hist_str, neg_list_2[i], 0))
                    train_set.append((userID, hist_str, neg_list_3[i], 0))
                    train_set.append((userID, hist_str, neg_list_4[i], 0))
            random.shuffle(train_set)
            train_set_df = pd.DataFrame(train_set, columns=['USER_ID', 'user_hist', 'ITEM_ID', 'target'])
            train_set_df = pd.merge(train_set_df, user_df.drop(columns=['user_hist']), on='USER_ID', how='left')
            train_set_df = pd.merge(train_set_df, item_df, on='ITEM_ID', how='left')
            file_path = os.path.join(saved_path, filename)
            train_set_df.to_csv(file_path, index=False)
            my_logger.info(f"Saved file: {file_path} with shape {train_set_df.shape}")
            processed_dfs[filename.split('_')[0]] = train_set_df

        self.train = processed_dfs['train']
        self.val = processed_dfs['val']
        self.test = processed_dfs['test']
        self.user_df = user_df
        self.item_df = item_df

    @staticmethod
    def get_var_feature(data, col, max_len=50):
        """
        将序列数据转换为固定长度数值序列，并生成映射字典。
        """
        key2index = {}

        def split(x):
            keys = x.split('|')
            for key in keys:
                if key not in key2index:
                    # 注意：0值保留给padding，因此从1开始编号
                    key2index[key] = len(key2index) + 1
            return [key2index[k] for k in keys]

        var_feature = list(map(split, data[col].values))
        var_feature = pad_sequences(var_feature, maxlen=max_len, padding='post')
        return key2index, var_feature, max_len

    @staticmethod
    def get_test_var_feature(data, col, key2index, max_len=50):
        """
            针对验证集/测试集或全量数据，再次将序列特征转换为固定长度数值序列。
            如果发现新的token则继续添加到key2index中。
        
        """
        def split(x):
            keys = x.split('|')
            for key in keys:
                if key not in key2index:
                    key2index[key] = len(key2index) + 1
            return [key2index[k] for k in keys]
        test_hist = list(map(split, data[col].values))
        test_hist = pad_sequences(test_hist, maxlen=max_len, padding='post')
        return test_hist

    def data_process(self, user_maxlen=50):
        """
        完整的数据预处理流程：
         1. 调用get_train_val_test_data生成CSV数据
         2. 读取CSV，并构造训练、验证、测试集
         3. 根据配置文件构造用户和物品的特征列表
         4. 读取所有用户和物品的原始数据，并进行预处理
        """
        self.get_train_val_test_data(user_maxlen=user_maxlen)
        data_config = self.data_config

        # 读取已保存的CSV数据

        self.train = pd.read_csv(os.path.join(data_config['saved_processed_data_path'], 'train_data.csv'))
        self.val = pd.read_csv(os.path.join(data_config['saved_processed_data_path'], 'val_data.csv'))
        self.test = pd.read_csv(os.path.join(data_config['saved_processed_data_path'], 'test_data.csv'))


        # self.train = pd.read_csv(data_config['saved_processed_data_path'] + '/train_data.csv',nrows=20000)
        # self.val = pd.read_csv(data_config['saved_processed_data_path'] + '/val_data.csv',nrows=5000)
        # self.test = pd.read_csv(data_config['saved_processed_data_path'] + '/test_data.csv',nrows=5000)

        # 构造用户与物品的特征列表
        user_sparse_features = []
        user_dense_features = []
        item_sparse_features = []
        item_dense_features = []
        feats_name_lt = ([item['input_name'] for item in data_config["data_config"]["user_data"]] +
                         [item['input_name'] for item in data_config["data_config"]["item_data"]])

        for user_fea in data_config['data_config']['user_data']:
            if user_fea["input_name"] not in feats_name_lt:
                continue
            if user_fea["feature_type"] == "IdFeature":
                user_sparse_features.append(user_fea["input_name"])
            elif user_fea["feature_type"] == "DenseFeature":
                user_dense_features.append(user_fea["input_name"])
                for df_data in [self.train, self.val, self.test]:
                    df_data[user_fea["input_name"]] = df_data[user_fea["input_name"]].fillna(0).astype(float)

        for item_fea in data_config['data_config']['item_data']:
            if item_fea["input_name"] not in feats_name_lt:
                continue
            if item_fea["feature_type"] == "IdFeature":
                item_sparse_features.append(item_fea["input_name"])
            elif item_fea["feature_type"] == "DenseFeature":
                item_dense_features.append(item_fea["input_name"])
                for df_data in [self.train, self.val, self.test]:
                    df_data[item_fea["input_name"]] = df_data[item_fea["input_name"]].fillna(0).astype(float)

        self.sparse_features = user_sparse_features + item_sparse_features
        self.dense_features = user_dense_features + item_dense_features

        for df in [self.train, self.val, self.test]:
            df[self.sparse_features] = df[self.sparse_features].fillna(-2)
            df[self.dense_features] = df[self.dense_features].fillna(0)
            df['tag'] = df['tag'].astype(str)

        # 读取所有用户和物品的原始数据
        keys_to_fetch = [item['input_name'] for item in data_config['data_config']['user_data']]
        keys_to_fetch.append('user_hist')
        sql_query = f"SELECT {', '.join(keys_to_fetch)} FROM {self.group_name}_all_user_data"
        cursor = self.conn.cursor()
        cursor.execute(sql_query)
        results = cursor.fetchall()
        all_user_feats = pd.DataFrame(results, columns=keys_to_fetch)
        all_user_feats[user_sparse_features] = all_user_feats[user_sparse_features].fillna(-2)
        all_user_feats[user_dense_features] = all_user_feats[user_dense_features].fillna(0).astype(float)
        all_user_feats['user_hist'] = all_user_feats['user_hist'].str.strip('|').str.split('|').str[-50:].str.join('|')

        keys_to_fetch = [item['input_name'] for item in data_config['data_config']['item_data']]
        sql_query = f"SELECT {', '.join(keys_to_fetch)} FROM {self.group_name}_all_item_data"
        cursor = self.conn.cursor()
        cursor.execute(sql_query)
        results = cursor.fetchall()
        all_item_feats = pd.DataFrame(results, columns=keys_to_fetch)
        all_item_feats[item_sparse_features] = all_item_feats[item_sparse_features].fillna(-2)
        all_item_feats[item_dense_features] = all_item_feats[item_dense_features].fillna(0).astype(float)
        all_item_feats['tag'] = all_item_feats['tag'].astype(str)

        self.all_user_feats = all_user_feats
        self.all_item_feats = all_item_feats

        # 根据配置文件中loan_data获取用户和物品的原始id，并去重
        user_id = data_config['data_config']['loan_data']['user_id']['input_name']
        item_id = data_config['data_config']['loan_data']['item_id']['input_name']
        all_user_feats['user_hist'] = all_user_feats['user_hist'].fillna("")
        # self.all_user_feats = all_user_feats.drop_duplicates(subset=[user_id])
        # self.all_item_feats = all_item_feats.drop_duplicates(subset=[item_id])
        self.all_user_feats = all_user_feats
        self.all_item_feats = all_item_feats
        self.user_id_df = self.all_user_feats[user_id]
        self.item_id_df = self.all_item_feats[item_id]

        self.user_sparse_features = user_sparse_features
        self.user_dense_features = user_dense_features
        self.item_sparse_features = item_sparse_features
        self.item_dense_features = item_dense_features

    def preprocess_features(self):
        """
        对稀疏特征使用LabelEncoder，对密集特征使用MinMaxScaler进行归一化，
        同时将转换器保存到磁盘。
        修改点：
        1. 为了避免“y contains previously unseen labels”错误，我们在 fit 之前
           一次性收集了该特征在 all_user_feats、all_item_feats，以及 train/val/test
           中出现过的所有可能取值（并加上 -2），这样就不会有“未知”值。
        2. 再对需要 transform 的各个数据表进行映射。
        """
        sparse_labelencoder = {}
        # ========== 对稀疏特征进行 LabelEncoder ==========
        for feat in self.sparse_features:
            lbe = LabelEncoder()
            # sparse_labelencoder[feat] = lbe
            user_col_data = pd.Series([], dtype=object)  # 默认为空，如果是user特征再填充
            item_col_data = pd.Series([], dtype=object)  # 默认为空，如果是item特征再填充
            # 如果该特征属于用户特征，则在all_user_feats里取出
            if feat in self.user_sparse_features:
                user_col_data = self.all_user_feats[feat]
            
            # 如果该特征属于物品特征，则在all_item_feats里取出
            if feat in self.item_sparse_features:
                item_col_data = self.all_item_feats[feat]

            # 同时把train, val, test中出现的值一并合并
            train_col_data = self.train[feat]
            val_col_data = self.val[feat]
            test_col_data =self.test[feat]
            # 合并所有数据，并去重
            combined_data = pd.concat(
                [user_col_data, item_col_data, train_col_data, val_col_data, test_col_data],
                ignore_index=True
            ).unique().tolist()
            # 加上-2，作为padding或unk
            combined_data.append(-2)
            # 使用 combined_data 进行 fit
            lbe.fit(combined_data)

            # Fit on all available data: all_user_feats and all_item_feats
            if feat in self.user_sparse_features:
                my_logger.info("feat in user_sparse_feats")
                # lbe.fit(self.all_user_feats[feat].tolist() + [-2])  # # +[-2] to include the padding token
                self.all_user_feats[feat] = lbe.transform(self.all_user_feats[feat])
            elif feat in self.item_sparse_features:
                my_logger.info("feat in all_item_feats")
                # lbe.fit(self.all_item_feats[feat].tolist() + [-2])
                self.all_item_feats[feat] = lbe.transform(self.all_item_feats[feat])
            # Transform train, val, and test datasets
            self.train[feat] = lbe.transform(self.train[feat])
            self.val[feat] = lbe.transform(self.val[feat])
            self.test[feat] = lbe.transform(self.test[feat])
            # 存入字典，方便后续保存
            sparse_labelencoder[feat] = lbe
        # ========== 对密集特征进行 MinMaxScaler ==========
        dense_minmaxscaler = {
            "user_dense_features": MinMaxScaler(feature_range=(0, 1)),
            "item_dense_features": MinMaxScaler(feature_range=(0, 1))
        }
        # 分别处理 user_dense_features 和 item_dense_features
        for idx, feats in enumerate([self.user_dense_features, self.item_dense_features]):
            if idx == 0:
                # 用户侧
                mms = dense_minmaxscaler["user_dense_features"]
                # 在 all_user_feats 的这些特征上 fit
                mms.fit(self.all_user_feats[feats])
                # transform all_user_feats
                self.all_user_feats[feats] = mms.transform(self.all_user_feats[feats])
            else:
                # 物品侧
                mms = dense_minmaxscaler["item_dense_features"]
                # 在 all_item_feats 的这些特征上 fit
                mms.fit(self.all_item_feats[feats])
                # transform all_item_feats
                self.all_item_feats[feats] = mms.transform(self.all_item_feats[feats])
             # train、val、test 同样做 transform
            self.train[feats] = mms.transform(self.train[feats])
            self.val[feats] = mms.transform(self.val[feats])
            self.test[feats] = mms.transform(self.test[feats])

        self.sparse_labelencoder = sparse_labelencoder
        self.dense_minmaxscaler = dense_minmaxscaler

        # 保存转换器
        transformer_dir = self.data_config['feature_transformer_dir']
        if not os.path.exists(transformer_dir):
            os.makedirs(transformer_dir)
        # 保存 sparse_labelencoder
        with open(os.path.join(transformer_dir, 'sparse_labelencoder.pkl'), 'wb') as f:
            pickle.dump(sparse_labelencoder, f)
        # 保存 dense_minmaxscaler
        with open(os.path.join(transformer_dir, 'dense_minmaxscaler.pkl'), 'wb') as f:
            pickle.dump(dense_minmaxscaler, f)

    def process_sequence_features(self):
        """
        对序列特征（user_hist和tag）进行处理，
        包括生成映射字典、序列编码和padding，
        并保存映射关系到磁盘。
        """
        self.user_key2index, train_user_hist, user_maxlen = self.get_var_feature(self.train, 'user_hist')
        self.train_user_hist = train_user_hist
        self.user_maxlen = user_maxlen
        self.val_user_hist = self.get_test_var_feature(self.val, 'user_hist', self.user_key2index, user_maxlen)
        self.new_user_hist = self.get_test_var_feature(self.all_user_feats, 'user_hist', self.user_key2index, 50)

        # 这里tag的最大长度设置为3（可调整）
        tag_max = 3
        self.tag_key2index, train_tag, tag_maxlen = self.get_var_feature(self.train, 'tag', max_len=tag_max)
        self.train_tag = train_tag
        self.tag_maxlen = tag_maxlen
        self.val_tag = self.get_test_var_feature(self.val, 'tag', self.tag_key2index, tag_maxlen)
        self.new_tag = self.get_test_var_feature(self.all_item_feats, 'tag', self.tag_key2index, tag_maxlen)

        # 保存映射字典
        user_key2index_dir = self.data_config['user_key2index']
        if not os.path.exists(user_key2index_dir):
            os.makedirs(user_key2index_dir)
        with open(os.path.join(user_key2index_dir, 'user_key2index.json'), 'w') as f:
            json.dump(self.user_key2index, f)

        tag_key2index_dir = self.data_config['tag_key2index']
        if not os.path.exists(tag_key2index_dir):
            os.makedirs(tag_key2index_dir)
        with open(os.path.join(tag_key2index_dir, 'tag_key2index.json'), 'w') as f:
            json.dump(self.tag_key2index, f)

    def build_feature_columns(self):
        """
        根据预处理后的数据构造模型所需的特征列，
        包括稀疏、密集以及变长序列特征。
        """
        self.user_feature_columns = [
            SparseFeat(feat, self.all_user_feats[feat].nunique() + 10, embedding_dim=4)
            for feat in self.user_sparse_features
        ] + [DenseFeat(feat, 1) for feat in self.user_dense_features]

        self.item_feature_columns = [
            SparseFeat(feat, self.all_item_feats[feat].nunique() + 10, embedding_dim=4, use_hash=True)
            for feat in self.item_sparse_features
        ] + [DenseFeat(feat, 1) for feat in self.item_dense_features]

        user_varlen = [VarLenSparseFeat(SparseFeat('user_hist', vocabulary_size=2000, embedding_dim=4),
                                        maxlen=self.user_maxlen, combiner='mean')]
        item_varlen = [VarLenSparseFeat(SparseFeat('tag', vocabulary_size=1000, embedding_dim=4),
                                        maxlen=self.tag_maxlen, combiner='mean')]
        self.user_feature_columns += user_varlen
        self.item_feature_columns += item_varlen

    def prepare_model_input(self):
        """
        构造训练、验证、测试阶段模型的输入字典。
        """
        self.train_model_input = {name: self.train[name] for name in self.sparse_features + self.dense_features}
        self.train_model_input["user_hist"] = self.train_user_hist
        self.train_model_input["tag"] = self.train_tag

        self.val_model_input = {name: self.val[name] for name in self.sparse_features + self.dense_features}
        self.val_model_input["user_hist"] = self.val_user_hist
        self.val_model_input["tag"] = self.val_tag

        self.test_model_input = {name: self.test[name] for name in self.sparse_features + self.dense_features}
        test_user_hist = self.get_test_var_feature(self.test, 'user_hist', self.user_key2index, self.user_maxlen)
        test_tag = self.get_test_var_feature(self.test, 'tag', self.tag_key2index, self.tag_maxlen)
        self.test_model_input["user_hist"] = test_user_hist
        self.test_model_input["tag"] = test_tag

    def build_model(self):
        """
        构建并编译DSSM模型。
        """
        self.model = DSSM(self.user_feature_columns, self.item_feature_columns, task='binary')
        self.model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['AUC'])
        self.model.summary()

    def train_model(self, epochs=10, batch_size=128):
        """
        训练模型。
        """
        self.history = self.model.fit(
            self.train_model_input,
            self.train['target'].values,
            batch_size=batch_size,
            epochs=epochs,
            verbose=1,
            validation_data=(self.val_model_input, self.val['target'].values)
        )
    def save_model(self):
        try:
            model_save_path = self.data_config['model_dir']
            self.model.save(model_save_path)
        except Exception as e:
            my_logger.error(f"保存模型失败：{e}")


    def evaluate(self):
        """
        在测试集上评估模型，并输出LogLoss和AUC指标。
        """
        pred_ans = self.model.predict(self.test_model_input, batch_size=128)
        test_logloss = round(log_loss(self.test['target'].values, pred_ans), 4)
        test_auc = round(roc_auc_score(self.test['target'].values, pred_ans), 4)
        my_logger.info(f"Test LogLoss:{test_logloss}")
        my_logger.info(f"Test AUC:{test_auc}")
        return test_logloss, test_auc

    def extract_embeddings(self):
        """
        从训练好的模型中提取用户塔和物品塔的embedding。
        """
        # 构造用户塔和物品塔的输入
        user_model_input = []
        for input_tensor in self.model.input:
            if input_tensor.name in self.user_sparse_features + self.user_dense_features + ["user_hist"]:
                user_model_input.append(input_tensor)
        item_model_input = []
        for input_tensor in self.model.input:
            if input_tensor.name in self.item_sparse_features + self.item_dense_features + ['tag']:
                item_model_input.append(input_tensor)

        self.user_embedding_model = Model(
            inputs=user_model_input,
            outputs=self.model.get_layer("user_embedding").output
        )
        self.item_embedding_model = Model(
            inputs=item_model_input,
            outputs=self.model.get_layer("item_embedding").output
        )

        user_tower_data_input = {feat: self.all_user_feats[feat] for feat in self.user_sparse_features + self.user_dense_features}
        user_tower_data_input["user_hist"] = self.new_user_hist

        item_tower_data_input = {feat: self.all_item_feats[feat] for feat in self.item_sparse_features + self.item_dense_features}
        item_tower_data_input["tag"] = self.new_tag

        self.user_embedding = self.user_embedding_model.predict(user_tower_data_input)
        self.item_embedding = self.item_embedding_model.predict(item_tower_data_input)
        my_logger.info(f"Item embedding shape:{self.item_embedding.shape}")

    def build_faiss_index(self):
        """
        利用milvus构建物品embedding的近似索引，并保存索引文件和用户embedding。
        """
        d = 128  # 向量维度
        nlist = 10  # 聚类数量
        index_type = f'IVF{nlist},Flat'
        metric_type = faiss.METRIC_INNER_PRODUCT
        faiss.normalize_L2(self.item_embedding)
        self.item_index = faiss.index_factory(d, index_type, metric_type)
        self.item_index.train(self.item_embedding)
        self.item_index.add_with_ids(self.item_embedding, self.item_id_df.to_numpy())
        embedding_dir = self.data_config['embedding_saved_dir']
        if not os.path.exists(embedding_dir):
            os.makedirs(embedding_dir)
        faiss.write_index(self.item_index, os.path.join(embedding_dir, "item_embedding.index"))
        np.save(os.path.join(embedding_dir, "user_embedding.npy"), self.user_embedding)

    def search(self, k=3):
        """
        利用faiss索引进行检索，返回每个用户的Top-k相似物品及对应分数。
        """
        D, I = self.item_index.search(self.user_embedding, k)
        my_logger.info(f"Nearest vector ids:{I[:5]}")
        my_logger.info(f"Metric distances:{D[-5:]}")
        return D, I

    def run(self):
        """
        运行整个推荐系统流程：数据预处理、特征处理、模型训练、评估、embedding提取与索引构建。
        """
        start_time = datetime.datetime.now()
        my_logger.info(f"Start time:{start_time.strftime('%Y-%m-%d %H:%M:%S')}")
        my_logger.info(f"Start to process institution: {self.group_name}...")
        self.data_process()
        self.preprocess_features()
        self.process_sequence_features()
        self.build_feature_columns()
        self.prepare_model_input()
        self.build_model()
        self.train_model()
        self.save_model()
        self.evaluate()
        self.extract_embeddings()
        self.build_faiss_index()
        self.search()

        end_time = datetime.datetime.now()
        my_logger.info(f"End time:{end_time.strftime('%Y-%m-%d %H:%M:%S')}")
        my_logger.info(f"Total duration:{end_time - start_time}")
        my_logger.info(f"Finished to process institution: {self.group_name}...")


if __name__ == "__main__":
    if len(sys.argv) != 3:
        print("Usage: python run_complete_class.py [config_path] [group_name]")
        sys.exit(1)

    config_path = sys.argv[1]
    group_name = sys.argv[2]

    engine = RecSysEngine(config_path, group_name)
    engine.run()
