import numpy as np
import pandas as pd
import json  
from sklearn.metrics import log_loss, roc_auc_score
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder, MinMaxScaler
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.python.keras.metrics import AUC 
from models.dssm import DSSM
from processing.feature_column import SparseFeat, get_feature_names, VarLenSparseFeat, DenseFeat
from tensorflow.keras.models import Model, load_model
from tensorflow.keras.optimizers import Adam
from utils import Negative_Sample
import os
import pickle
os.environ['CUDA_VISIBLE_DEVICES'] = '5'  

import datetime
import pymysql
import random



from sklearn.model_selection import train_test_split  
def get_train_val_test_data(coon,groupName,data_config,user_maxlen=50):
    keys_to_fetch = [item['input_name'] for item in data_config['data_config']['user_data']]
    keys_to_fetch.append('user_hist')
    # sql_query = f"SELECT {', '.join(keys_to_fetch)} FROM {groupName}_all_user_data LIMIT 200"
    sql_query = f"SELECT {', '.join(keys_to_fetch)} FROM {groupName}_all_user_data"
    cursor = conn.cursor()
    cursor.execute(sql_query)
    results = cursor.fetchall()
    user_df = pd.DataFrame(results, columns=keys_to_fetch)

    # 维护user_id_to_index.json便于后期根据id找到对应的index，从而找到embedding
    user_id_list = user_df['USER_ID'].tolist()
    
    user_df['user_hist'] = user_df['user_hist'].str.strip('|').str.split('|').str[-50:].str.join('|')
    
    
    train_df,temp_df = train_test_split(user_df, test_size=0.2, random_state=42)
    val_df,test_df = train_test_split(temp_df, test_size=0.5, random_state=42)

    del temp_df
    # region
    keys_to_fetch = [item['input_name'] for item in data_config['data_config']['item_data']]
    sql_query = f"SELECT {', '.join(keys_to_fetch)} FROM {groupName}_all_item_data"
    cursor = conn.cursor()
    cursor.execute(sql_query)
    results = cursor.fetchall()
    item_df = pd.DataFrame(results, columns=keys_to_fetch)
    unique_item_ids = item_df['ITEM_ID'].unique().tolist()
    len_item = len(unique_item_ids)
    # endregion
    if not os.path.exists(data_config['saved_processed_data_path']):   
        os.makedirs(data_config['saved_processed_data_path']) 

    for i in range(3):
        if i == 0:
            train = train_df
            f_name = data_config['saved_processed_data_path'] + '/train_data.csv'
        elif i == 1:
            train = val_df
            f_name = data_config['saved_processed_data_path'] + '/val_data.csv'
        else: 
            train = test_df
            f_name = data_config['saved_processed_data_path'] + '/test_data.csv'
        print(f"train.shape : {train.shape}")

        train_set = []
        for userID, hist in zip(train['USER_ID'], train['user_hist']):
            pos_list = [int(x) for x in hist.split('|') if x]
            
            def get_neg():
                neg = pos_list[0]
                while neg in pos_list:
                    neg = unique_item_ids[random.randint(1, len_item)-1]
                return neg

            neg_list_1 = [get_neg() for i in range(len(pos_list))]
            neg_list_2 = [get_neg() for i in range(len(pos_list))]
            neg_list_3 = [get_neg() for i in range(len(pos_list))]
            neg_list_4 = [get_neg() for i in range(len(pos_list))]

            for i in range(1, len(pos_list)):   
                hist = pos_list[:i][-50:]
                hist_str = '|'.join(map(str, hist))
                if i != len(pos_list):
                    train_set.append((userID, hist_str, pos_list[i], 1))
                    train_set.append((userID, hist_str, neg_list_1[i], 0))
                    train_set.append((userID, hist_str, neg_list_2[i], 0))
                    train_set.append((userID, hist_str, neg_list_3[i], 0))
                    train_set.append((userID, hist_str, neg_list_4[i], 0))
        random.shuffle(train_set)
        train_set_df = pd.DataFrame(train_set)
        train_set_df.columns = ['USER_ID', 'user_hist', 'ITEM_ID', 'target']
        train_set_df = pd.merge(train_set_df, user_df.drop(columns=['user_hist']), on='USER_ID', how='left')
        train_set_df = pd.merge(train_set_df, item_df, on='ITEM_ID', how='left')
        print(f"train_set_df.shape : {train_set_df.shape}")
        train_set_df.to_csv(f_name, index=False, sep=',', mode='w', header=True)
        print(f"文件已写入 {f_name}")

# 将序列数据转换为固定长度的数值序列，这些序列可以被深度学习模型处理。
def get_var_feature(data, col, max_len = 50):
    key2index = {} # key2index 字典，用于后续将序列元素映射到数值索引
    def split(x):
        key_ans = x.split('|')
        for key in key_ans:
            if key not in key2index:
                # Notice : input value 0 is a special "padding",so we do not use 0 to encode valid feature for sequence input
                key2index[key] = len(key2index) + 1
        return list(map(lambda x: key2index[x], key_ans))
    var_feature = list(map(split, data[col].values))
    var_feature_length = np.array(list(map(len, var_feature)))
    # max_len = max(var_feature_length)
    var_feature = pad_sequences(var_feature, maxlen=max_len, padding='post', )
    return key2index, var_feature, max_len


def get_test_var_feature(data, col, key2index, max_len = 50):
    def split(x):
        key_ans = x.split('|')
        for key in key_ans:
            if key not in key2index:
                # Notice : input value 0 is a special "padding",so we do not use 0 to encode valid feature for sequence input
                key2index[key] = len(key2index) + 1
        return list(map(lambda x: key2index[x], key_ans))
    test_hist = list(map(split, data[col].values))
    test_hist = pad_sequences(test_hist, maxlen=max_len, padding='post', )
    return test_hist


def data_process(conn,groupName,data_config_path,user_maxlen=50):

    with open(data_config_path, 'r') as file:
        data_config = json.load(file)
    get_train_val_test_data(conn,groupName,data_config)
    # train = pd.read_csv(data_config['saved_processed_data_path'] + '/train_data.csv',nrows=20000)
    # val = pd.read_csv(data_config['saved_processed_data_path'] + '/val_data.csv',nrows=5000)
    # test = pd.read_csv(data_config['saved_processed_data_path'] + '/test_data.csv',nrows=5000)
    train = pd.read_csv(data_config['saved_processed_data_path'] + '/train_data.csv')
    val = pd.read_csv(data_config['saved_processed_data_path'] + '/val_data.csv')
    test = pd.read_csv(data_config['saved_processed_data_path'] + '/test_data.csv')

    # train = pd.concat([train, test], ignore_index=True) 

    user_sparse_features = [] 
    user_dense_features = [] 
    item_sparse_features = []  
    item_dense_features = [] 

    feats_name_lt = [item['input_name'] for item in data_config["data_config"]["user_data"]]+[item['input_name'] for item in data_config["data_config"]["item_data"]]
    for user_fea in data_config['data_config']['user_data']:
        if user_fea["input_name"] not in feats_name_lt:
            continue
        if user_fea["feature_type"] == "IdFeature":
            user_sparse_features.append(user_fea["input_name"])
        elif user_fea["feature_type"] == "DenseFeature":
            user_dense_features.append(user_fea["input_name"])
            if not user_fea["input_type"].startswith("INT") and not user_fea["input_type"].startswith("FLOAT"):
                for df_data in [train, val, test]:
                    df_data[user_fea["input_name"]] = df_data[user_fea["input_name"]].fillna(0)
                    df_data[user_fea["input_name"]] = df_data[user_fea["input_name"]].astype(float) 
    
    for item_fea in data_config['data_config']['item_data']:
        if item_fea["input_name"] not in feats_name_lt:
            continue
        if item_fea["feature_type"] == "IdFeature":
            item_sparse_features.append(item_fea["input_name"])
        elif item_fea["feature_type"] == "DenseFeature":
            item_dense_features.append(item_fea["input_name"])
            if (not item_fea["input_type"].startswith("INT")) and (not item_fea["input_type"].startswith("FLOAT")):
                for df_data in [train, val, test]:
                    df_data[item_fea["input_name"]] = df_data[item_fea["input_name"]].fillna(0)
                    df_data[item_fea["input_name"]] = df_data[item_fea["input_name"]].astype(float) 

    sparse_features = user_sparse_features + item_sparse_features
    dense_features = user_dense_features + item_dense_features

    for df in [train, val, test]:
        df[sparse_features] = df[sparse_features].fillna(-2)
        df[dense_features] = df[dense_features].fillna(0)
        df['tag'] = df['tag'].astype(str) 
    
    # data = pd.concat([train, val, test], ignore_index=True) 

    #读取所有user和item的特征
    print("正在读取所有user和item的数据............")
    keys_to_fetch = [item['input_name'] for item in data_config['data_config']['user_data']]
    keys_to_fetch.append('user_hist')
    sql_query = f"SELECT {', '.join(keys_to_fetch)} FROM {groupName}_all_user_data"
    cursor = conn.cursor()
    cursor.execute(sql_query)
    results = cursor.fetchall()
    user_df = pd.DataFrame(results, columns=keys_to_fetch)
    user_df[user_sparse_features] = user_df[user_sparse_features].fillna(-2)
    user_df[user_dense_features] = user_df[user_dense_features].fillna(0)
    user_df[user_dense_features] = user_df[user_dense_features].astype(float) 
    user_df['user_hist'] = user_df['user_hist'].str.strip('|').str.split('|').str[-50:].str.join('|')

    keys_to_fetch = [item['input_name'] for item in data_config['data_config']['item_data']]
    sql_query = f"SELECT {', '.join(keys_to_fetch)} FROM {groupName}_all_item_data"
    cursor = conn.cursor()
    cursor.execute(sql_query)
    results = cursor.fetchall()
    item_df = pd.DataFrame(results, columns=keys_to_fetch)
    item_df[item_sparse_features] = item_df[item_sparse_features].fillna(-2)
    item_df[item_dense_features] = item_df[item_dense_features].fillna(0)
    item_df[item_dense_features] = item_df[item_dense_features].astype(float) 

    item_df['tag'] = item_df['tag'].astype(str) 
    print("读取完毕！")   


    return data_config, train, val, test, sparse_features, dense_features, \
        user_sparse_features, user_dense_features, \
        item_sparse_features, item_dense_features, \
        user_df, item_df

if __name__ == "__main__":
    import sys
    if len(sys.argv) != 2:
        print("使用方法: python run_complete.py [配置文件路径]")
        sys.exit(1)
    
    config_path = sys.argv[1]

    time1 = datetime.datetime.now()
    print("The start time of the program: ",time1.strftime('%Y-%m-%d %H:%M:%S'))
    # train, val, test, data = data_process()

    conn = pymysql.connect(
        host='10.240.0.8',
        user='lsp_rec_book',
        passwd='WaFQ_CRqZekqzxSKgr3l',
        port=38141,
        db='lsp_rec_book_sql',  # 连接到数据库
        charset='utf8'
    )
    groupName = 'GZU'
    data_config, train, val, test, sparse_features, dense_features, \
        user_sparse_features, user_dense_features, \
        item_sparse_features, item_dense_features, \
        all_user_feats, all_item_feats = data_process(conn,groupName,config_path)
    
    user_id = data_config['data_config']['loan_data']['user_id']['input_name']
    item_id = data_config['data_config']['loan_data']['item_id']['input_name']
    all_user_feats['user_hist'] = all_user_feats['user_hist'].fillna("")    
    
    #endregion

    all_user_feats = all_user_feats.drop_duplicates(subset=[user_id]) 
    all_item_feats = all_item_feats.drop_duplicates(subset=[item_id]) 

    #用户和物品的原始id
    user_id_df = all_user_feats[user_id]
    item_id_df = all_item_feats[item_id]


    var_features = ['user_hist']
    target = ['target']

    # sparse_labelencoder = {}
    # for f in sparse_features:
    #     sparse_labelencoder[f] = LabelEncoder()
    # dense_minmaxscaler = {"user_dense_features" : MinMaxScaler(feature_range=(0, 1)),
    #                       "item_dense_features" : MinMaxScaler(feature_range=(0, 1))}
    # 初始化 LabelEncoder 和 MinMaxScaler
    sparse_labelencoder = {f: LabelEncoder() for f in sparse_features}
    dense_minmaxscaler = {
        "user_dense_features": MinMaxScaler(feature_range=(0, 1)),
        "item_dense_features": MinMaxScaler(feature_range=(0, 1))
    }

    import warnings 
    with warnings.catch_warnings():  
        warnings.simplefilter("ignore")
        # 1.Label Encoding for sparse features,and process sequence features
        # 对于每个稀疏特征，创建一个LabelEncoder实例，使用训练集中该特征的所有值以及特殊值-2进行拟合（fit），
        # 然后转换（transform）训练集、验证集和测试集中的相应特征。
        for feat in sparse_features:
            lbe = sparse_labelencoder[feat]

            # lbe = LabelEncoder()
            # 收集所有数据中的唯一值
            if feat in user_sparse_features:
                # 用户特征：合并 all_user_feats 和 train/val/test 中的值
                unique_values = pd.concat([
                    all_user_feats[feat],
                    train[feat],
                    val[feat],
                    test[feat]
                ]).unique()
            elif feat in item_sparse_features:
                # 物品特征：合并 all_item_feats 和 train/val/test 中的值
                unique_values = pd.concat([
                    all_item_feats[feat],
                    train[feat],
                    val[feat],
                    test[feat]
                ]).unique()
            
            # 确保 -2 在唯一值中
            if -2 not in unique_values:
                unique_values = np.append(unique_values, -2)
            # 使用所有唯一值进行 fit
            lbe.fit(unique_values)
            # 对所有数据表进行 transform
            if feat in user_sparse_features:
                all_user_feats[feat] = lbe.transform(all_user_feats[feat])
            elif feat in item_sparse_features:
                all_item_feats[feat] = lbe.transform(all_item_feats[feat])
            train[feat] = lbe.transform(train[feat])
            val[feat] = lbe.transform(val[feat])
            test[feat] = lbe.transform(test[feat])

            # if feat in user_sparse_features:
            #     lbe.fit(all_user_feats[feat].tolist() + [-2])
            #     all_user_feats[feat] = lbe.transform(all_user_feats[feat])
            # elif feat in item_sparse_features:
            #     lbe.fit(all_item_feats[feat].tolist() + [-2])
            #     all_item_feats[feat] = lbe.transform(all_item_feats[feat])
            # train[feat] = lbe.transform(train[feat])
            # val[feat] = lbe.transform(val[feat])
            # test[feat] = lbe.transform(test[feat])   
   
        # 对于用户和物品的密集特征，分别使用MinMaxScaler进行拟合和转换。
        # 这里通过feature_range参数设置特征值的范围为0到1。    
        # LabelEncoder.fit()方法拟合数据以了解特征中存在哪些唯一值，并创建一个转换映射。
        # MinMaxScaler.fit()方法拟合数据以计算最小值和最大值，这些值将用于后续的缩放转换。
        for index, feat_group in enumerate([user_dense_features, item_dense_features]):
            if feat_group:  # 确保特征组不为空
                if index == 0:
                    mms = dense_minmaxscaler["user_dense_features"]
                    mms.fit(all_user_feats[feat_group])
                    all_user_feats[feat_group] = mms.transform(all_user_feats[feat_group])
                elif index == 1:
                    mms = dense_minmaxscaler["item_dense_features"]
                    mms.fit(all_item_feats[feat_group])
                    all_item_feats[feat_group] = mms.transform(all_item_feats[feat_group])
                train[feat_group] = mms.transform(train[feat_group])
                val[feat_group] = mms.transform(val[feat_group])
                test[feat_group] = mms.transform(test[feat_group])
    
    # 将数据预处理中使用的转换器对象保存到磁盘上的文件中，以便于后续使用。
    # 比如labelencoder，科学对应1，文学对应2，要把这个映射关系保存在pkl文件中
    if not os.path.exists(data_config['feature_transformer_dir']):   
        os.makedirs(data_config['feature_transformer_dir']) 
    with open(data_config['feature_transformer_dir'] + '/sparse_labelencoder.pkl', 'wb') as f:  
        pickle.dump(sparse_labelencoder, f)
    with open(data_config['feature_transformer_dir'] + '/dense_minmaxscaler.pkl', 'wb') as f:  
        pickle.dump(dense_minmaxscaler, f)


    # preprocess the sequence feature序列特征预处理
    # 有两个序列特征都要处理：用户借阅 user1 book2 book4和tag序列（无序）科学 村上春树 诺贝尔
    user_key2index, train_user_hist, user_maxlen = get_var_feature(train, 'user_hist')
    val_user_hist = get_test_var_feature(val, 'user_hist', user_key2index, user_maxlen)
    new_user_hist = get_test_var_feature(all_user_feats, 'user_hist', user_key2index, 50)

    tag_key2index, train_tag, tag_maxlen = get_var_feature(train, 'tag', max_len = 3)
    val_tag = get_test_var_feature(val, 'tag', tag_key2index, tag_maxlen)
    new_tag = get_test_var_feature(all_item_feats, 'tag', tag_key2index, tag_maxlen)
    

    if not os.path.exists(data_config['user_key2index']):   
        os.makedirs(data_config['user_key2index']) 
    with open(data_config['user_key2index'] + '/user_key2index.json', 'w') as f:  
        json.dump(user_key2index, f)
    
    if not os.path.exists(data_config['tag_key2index']):   
        os.makedirs(data_config['tag_key2index']) 
    with open(data_config['tag_key2index'] + '/tag_key2index.json', 'w') as f:  
        json.dump(tag_key2index, f)

    # 这部分代码给模型看的，让模型看该如何对这些数据进行处理
    user_feature_columns = [SparseFeat(feat, all_user_feats[feat].nunique()+10, embedding_dim=4)
                           for i, feat in enumerate(user_sparse_features)] + [DenseFeat(feat, 1,)
                          for feat in user_dense_features]
    item_feature_columns = [SparseFeat(feat, all_item_feats[feat].nunique()+10, embedding_dim=4, use_hash=True)
                           for i, feat in enumerate(item_sparse_features)] + [DenseFeat(feat, 1,)
                          for feat in item_dense_features]

    user_varlen_feature_columns = [VarLenSparseFeat(SparseFeat('user_hist', vocabulary_size=2000,
                                embedding_dim=4), maxlen=user_maxlen, combiner='mean', weight_name=None)] 

    item_varlen_feature_columns = [VarLenSparseFeat(SparseFeat('tag', vocabulary_size=1000,
                                embedding_dim=4), maxlen=tag_maxlen, combiner='mean', weight_name=None)]


    # 3.generate input data for model
    user_feature_columns += user_varlen_feature_columns
    item_feature_columns += item_varlen_feature_columns


    #add user history as user_varlen_feature_columns
    train_model_input = {name:train[name] for name in sparse_features + dense_features}
    train_model_input["user_hist"] = train_user_hist
    train_model_input["tag"] = train_tag



    val_model_input = {name:val[name] for name in sparse_features + dense_features}
    val_model_input["user_hist"] = val_user_hist
    val_model_input["tag"] = val_tag

    # 4.Define Model,train,predict and evaluate
    model = DSSM(user_feature_columns, item_feature_columns, task='binary')
    model.summary()

    model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['AUC'], )


    history = model.fit(train_model_input, train[target].values,
                        batch_size=128, epochs=10, verbose=2, 
                        validation_data=(val_model_input, val[target].values))
    


    # #region
    # # 保存tag到record_ids的映射关系
    # item_df = pd.read_csv(data_config['item_path'])
    # item_df['RECORD_ID'] = item_df['RECORD_ID'].astype(int)
    # # print(type(item_df['tag']))
    # item_df['tag'] = item_df['tag'].astype(str)
    # item_df['tags'] = item_df['tag'].apply(lambda x: x.split('|'))

    # tag_to_record_ids = {}
    # for _, row in item_df.iterrows():
    #     record_id = row['RECORD_ID']
    #     tags = row['tags']
    #     for tag in tags:
    #         if tag not in tag_to_record_ids:
    #             tag_to_record_ids[tag] = []
    #         tag_to_record_ids[tag].append(record_id)

    # tag_df = pd.DataFrame(list(tag_to_record_ids.items()), columns=['tag', 'RECORD_ID'])
    # tag_df['RECORD_ID'] = tag_df['RECORD_ID'].apply(lambda x: '|'.join(map(str, x)))
    # tag_df.to_csv(data_config['tag2recordids_path'], index=False)
    # #endregion

    print("**************************finish********************************")

    model_save_path = data_config['model_dir']
    model.save(model_save_path)
    # model = load_model(model_save_path)  
    test_user_hist = get_test_var_feature(test, 'user_hist', user_key2index, user_maxlen)

    test_tag = get_test_var_feature(test, 'tag', tag_key2index, tag_maxlen)

    test_model_input = {name : test[name] for name in sparse_features + dense_features}
    test_model_input["user_hist"] = test_user_hist
    test_model_input["tag"] = test_tag


    pred_ans = model.predict(test_model_input, batch_size=128)
    print("test LogLoss", round(log_loss(test[target].values, pred_ans), 4))
    print("test AUC", round(roc_auc_score(test[target].values, pred_ans), 4))


    #用户塔的输入
    user_model_input = []
    for input_tensor in model.input:
        if input_tensor.name in user_sparse_features + user_dense_features + ["user_hist"]:  
            user_model_input.append(input_tensor)
    #物品塔的的输入
    item_model_input = []
    for input_tensor in model.input:
        if input_tensor.name in item_sparse_features + item_dense_features + ['tag']:  
            item_model_input.append(input_tensor)
    # 双塔模型，把每一个塔单独提出来
    user_embedding_model = Model(inputs=user_model_input, outputs=model.get_layer("user_embedding").output)
    item_embedding_model = Model(inputs=item_model_input, outputs=model.get_layer("item_embedding").output)
    
    #用户塔的输入数据
    user_tower_data_input = {}
    for user_feat in user_sparse_features + user_dense_features:
        user_tower_data_input[user_feat] = all_user_feats[user_feat]
    user_tower_data_input["user_hist"] = new_user_hist

    #物品塔的输入数据
    item_tower_data_input = {}
    for item_feat in item_sparse_features + item_dense_features:
        item_tower_data_input[item_feat] = all_item_feats[item_feat]
    item_tower_data_input["tag"] = new_tag
    
    #得到用户和物品的embedding
    user_embedding = user_embedding_model.predict(user_tower_data_input)
    item_embedding = item_embedding_model.predict(item_tower_data_input)
    print("item embedding shape: ", item_embedding.shape)

    

    print("*****************************************")
    import faiss
    # 〇，基本参数设置
    d = 128                                           # 向量维度
    nlist = 10  # 聚类的数量，这是一个超参数，需要根据数据集进行调整  
    index_type = 'IVF{},Flat'.format(nlist)
    metric_type = faiss.METRIC_INNER_PRODUCT         # 度量(相似度/距离)类型
    faiss.normalize_L2(item_embedding)
    faiss.normalize_L2(user_embedding)
    
    if not os.path.exists(data_config['embedding_saved_dir']):   
        os.makedirs(data_config['embedding_saved_dir']) 

    # #构建user向量库索引
    # user_index = faiss.index_factory(d,index_type,metric_type)    
    # user_index.train(user_embedding)            
    # user_index.add_with_ids(user_embedding, user_id_df.to_numpy()) 
    # #保存user索引
    # faiss.write_index(user_index, data_config['embedding_saved_dir'] + "/user_embedding.index")
    np.save(data_config['embedding_saved_dir'] + "/user_embedding.npy", user_embedding)

    #构建item向量库索引
    item_index = faiss.index_factory(d,index_type,metric_type)  
    item_index.train(item_embedding)            
    item_index.add_with_ids(item_embedding, item_id_df.to_numpy()) 
    #保存item索引
    faiss.write_index(item_index, data_config['embedding_saved_dir'] + "/item_embedding.index")

    print('============================== search ==============================')
    k = 3                       # top K 的索引
    D, I = item_index.search(user_embedding, k) 
    print('nearest vector ids:\n',I[:5],'\n')
    print('metric(distances/scores) to query:\n',D[-5:],'\n')



    time2 = datetime.datetime.now()
    print("The ending time of the program:",time2.strftime('%Y-%m-%d %H:%M:%S'))

    print("The program lasted: ", time2-time1)