import numpy as np
import pandas as pd
import json  
from sklearn.metrics import log_loss, roc_auc_score
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder, MinMaxScaler
# 下面这个会有报错，终端输入：export PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION=python即可解决
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.python.keras.metrics import AUC 
from models.dssm import DSSM
from processing.feature_column import SparseFeat, get_feature_names, VarLenSparseFeat, DenseFeat
from tensorflow.keras.models import Model, load_model
from tensorflow.keras.optimizers import Adam
from utils import Negative_Sample
import os
import pickle
import pymysql
os.environ['CUDA_VISIBLE_DEVICES'] = '0'  
pd.set_option('display.max_colwidth', None)  # None 表示不限制列宽
pd.set_option('display.max_columns', None)
import sys, datetime
sys.path.append("../")
from logs.log import setup_custom_logger

# 日志系统初始化
_ONE_DAY_IN_SECONDS = 60 * 60 * 24
LOG_DIR = '../Logfiles'
file_name = str(datetime.datetime.now().strftime('%Y-%m-%d'))
my_logger = setup_custom_logger(os.path.join(LOG_DIR, '%s.log' % file_name), log_level="INFO")

def encode_df(data_config,df_to_encode,data_type='user_data'):

    # 分辨稀疏特征和密集特征，分别编码
    sparse_features = [] 
    dense_features = [] 
    for feature in data_config['data_config'][data_type]: # data type可以是user_data和item_data，对物品和用户数据规范属性
        if feature["feature_type"] == "IdFeature":
            sparse_features.append(feature["input_name"])
        elif feature["feature_type"] == "DenseFeature":
            dense_features.append(feature["input_name"])
        
    my_logger.info("start to encoding...")
    #特征编码
    with open(data_config['feature_transformer_dir'] + '/sparse_labelencoder.pkl', 'rb') as f:  
        sparse_labelencoder = pickle.load(f)
    with open(data_config['feature_transformer_dir'] + '/dense_minmaxscaler.pkl', 'rb') as f:  
        dense_minmaxscaler = pickle.load(f)
    import warnings 
    with warnings.catch_warnings():  
        warnings.simplefilter("ignore")
        # 1.Label Encoding for sparse features,and process sequence features
        # print("sparse_features",sparse_features)
        # print("--------------------")
        # print("label_encoder",sparse_labelencoder.keys())
        # print("********************")
        for feat in sparse_features:
            lbe = sparse_labelencoder[feat]

            is_known = df_to_encode[feat].isin(lbe.classes_)  
            df_to_encode.loc[~is_known, feat] = -2
            df_to_encode[feat] = lbe.transform(df_to_encode[feat])

        if data_type == 'user_data':
            mms = dense_minmaxscaler["user_dense_features"]

        elif data_type == 'item_data':
            mms = dense_minmaxscaler["item_dense_features"]
        df_to_encode[dense_features] = mms.transform(df_to_encode[dense_features])
    my_logger.info("encoding success!!!")
    if data_type == 'user_data':
        return df_to_encode,sparse_features+dense_features+["user_hist"]
    elif data_type == 'item_data':
        return df_to_encode,sparse_features+dense_features+["tag"]
    else:
        my_logger.error("data_type error!!!")
        return

def get_var_feature(data, col):
    key2index = {}
    
    def split(x):
        key_ans = x.split('|')
        for key in key_ans:
            if key not in key2index:
                # Notice : input value 0 is a special "padding",so we do not use 0 to encode valid feature for sequence input
                key2index[key] = len(key2index) + 1
        return list(map(lambda x: key2index[x], key_ans))
    
    var_feature = list(map(split, data[col].values))
    var_feature_length = np.array(list(map(len, var_feature)))
    max_len = max(var_feature_length)
    var_feature = pad_sequences(var_feature, maxlen=max_len, padding='post', )
    return key2index, var_feature, max_len


def get_test_var_feature(data, col, initial_key2index, max_len):
    key2index = initial_key2index
    def split(x):
        key_ans = x.split('|')
        for key in key_ans:
            if key not in key2index:
                # Notice : input value 0 is a special "padding",so we do not use 0 to encode valid feature for sequence input
                key2index[key] = len(key2index) + 1
        return list(map(lambda x: key2index[x], key_ans))
    test_hist = list(map(split, data[col].values))
    test_hist = pad_sequences(test_hist, maxlen=max_len, padding='post', )
    return test_hist, key2index

# 这个加载出来可以直接拿模型去预测
def get_user_df(conn,groupName,data_config,user_maxlen=50):
    keys_to_fetch = [item['input_name'] for item in data_config['data_config']['user_data']]
    keys_to_fetch.append('user_hist')
    sql_query = f"SELECT {', '.join(keys_to_fetch)} FROM {groupName}_all_user_data"
    cursor = conn.cursor()
    cursor.execute(sql_query)
    results = cursor.fetchall()
    user_df = pd.DataFrame(results, columns=keys_to_fetch)

    # 1. 维护embedding_index列便于后期根据id找到对应的index，从而找到embedding
    # region 原来的代码
    # user_id_list = user_df['USER_ID'].tolist()
    # user_embedding_index = [i for i in range(len(user_id_list))]
    # id2index = dict(zip(user_id_list, user_embedding_index))
    # # 存储字典到json文件
    # with open(data_config['user_id_to_index'], 'w') as f:
    #     json.dump(id2index, f)
    #     print("user_id2index.json文件已经保存")
    #endregion

    cursor.execute(f"SHOW COLUMNS FROM {groupName}_all_user_data LIKE 'embedding_index';")
    column_exists = cursor.fetchone()
    if not column_exists:
        # 列不存在，添加列
        sql = f"ALTER TABLE {groupName}_all_user_data ADD COLUMN embedding_index INT AFTER UPDATE_DATE"
        cursor.execute(sql)
    user_id_list = user_df['USER_ID'].tolist()
    user_embedding_index = [i for i in range(len(user_id_list))]
    for user_id, embedding_index in zip(user_id_list, user_embedding_index):
        sql = f"UPDATE {groupName}_all_user_data SET embedding_index = %s WHERE USER_ID = %s"
        cursor.execute(sql, (embedding_index, user_id))
    conn.commit()
    print("user_id对应的索引数据更新成功")

    pd.set_option('display.max_colwidth', None)
    # 2. 提取用户的行为序列
    # 对于提取后的user_df,其中user_hist,是一个字符串，祛除字符串两端的'|'符号，然后根据'|'划分为列表，并提取列表的最后50个元素，提取完后重新用|把它们连接起来拼成字符串
    user_df['user_hist'] = user_df['user_hist'].str.strip('|').str.split('|').str[-50:].str.join('|')
    
    # 更新用户的key2index
    file_path = data_config['user_key2index'] + '/user_key2index.json'

    if not os.path.exists(data_config['user_key2index']):   
        os.makedirs(data_config['user_key2index'])
    file_exist_flag = True
    if not os.path.exists(file_path):
        file_exist_flag = False
        user_key2index = {}
        with open(file_path, 'w') as f:
            pass
    with open(file_path, 'r') as f:
        if file_exist_flag:  
            user_key2index = json.load(f) 
    # 处理变长数据，借阅行为不足50的用0填充
    new_user_hist,user_key2index = get_test_var_feature(user_df, 'user_hist', user_key2index, user_maxlen)
    with open(file_path, 'w') as f:  
        json.dump(user_key2index, f)

    

    # 3. 对user的数据进行编码
    user_df,all_features = encode_df(data_config,user_df,'user_data')
    
    return user_df,all_features,new_user_hist


def get_item_df(conn,groupName,data_config,item_maxlen=3):
    keys_to_fetch = [item['input_name'] for item in data_config['data_config']['item_data']]
    # print("keys to fetch",keys_to_fetch)
    # sql_query = f"SELECT * FROM {groupName}_all_item_data"
    sql_query = f"SELECT * FROM {groupName}_all_item_data"
    # all_item_df = pd.read_sql(sql_query, conn)
    all_item_df = pd.read_sql_query(sql_query, conn)
    item_id_list = all_item_df['ITEM_ID'].tolist()
    # cursor = conn.cursor()
    # cursor.execute(sql_query)
    # results = cursor.fetchall()
    # all_item_df = pd.DataFrame(results)
    # print("cols",all_item_df.columns)
    item_df = all_item_df[keys_to_fetch]
    # TODO: 物品的每日更新目前比较粗糙，每次更新需要全部运算，物品不同的一点是，之前的物品序列不会变化，所以考虑能不能做增量更新，否则会浪费过多的资源
    # 维护tag2Record_ids.csv便于后期根据tag找到recordids，从而根据用户兴趣选书
    #region
    """
    将item to tag映射
    item1    tag1|tag2|tag3
    转化为tag to item 映射
    tag1   "item1|item2|item3|item4"
    """
    temp_df = all_item_df[['RECORD_ID', 'tag']]
    del all_item_df
    # temp_df = pd.DataFrame(results, columns=['RECORD_ID', 'tag'])
    temp_df = temp_df.drop_duplicates()
    temp_df['RECORD_ID'] = temp_df['RECORD_ID'].astype(int)
    temp_df['tag'] = temp_df['tag'].astype(str)
    temp_df['tag'] = temp_df['tag'].apply(lambda x: x.split('|'))
    tag_to_record_ids = {}
    for _, row in temp_df.iterrows():
        record_id = row['RECORD_ID']
        tags = row['tag']
        for tag in tags:
            if tag not in tag_to_record_ids:
                tag_to_record_ids[tag] = []
            tag_to_record_ids[tag].append(record_id)

    tag_df = pd.DataFrame(list(tag_to_record_ids.items()), columns=['tag', 'RECORD_ID'])
    tag_df['RECORD_ID'] = tag_df['RECORD_ID'].apply(lambda x: '|'.join(map(str, x)))
    tag_df.to_csv(data_config['tag2recordids_path'], index=False)

    # 写之前先清空data_config['tag2recordids_path']
    with conn.cursor() as cursor:
        # 检查表是否存在并清空数据
        cursor.execute(f"DROP TABLE IF EXISTS {groupName}_tag2recordId")
        cursor.execute(f"""
            CREATE TABLE {groupName}_tag2recordId (
                tag VARCHAR(255),
                idList TEXT
            )
        """)
        conn.commit()
        # 使用 executemany 批量插入数据
        insert_query = f"INSERT INTO {groupName}_tag2recordId (tag, idList) VALUES (%s, %s)"
        cursor.executemany(insert_query, tag_df.itertuples(index=False))
        print(f"Table {groupName}_tag2recordId created and data inserted")
        conn.commit()
    # 更新tag的key2index
    file_path = data_config['tag_key2index'] + '/tag_key2index.json'
    if not os.path.exists(data_config['tag_key2index']):   
        os.makedirs(data_config['tag_key2index'])
    file_exist_flag = True
    if not os.path.exists(file_path):
        file_exist_flag = False
        tag_key2index = {}
        with open(file_path, 'w') as f:
            pass

    with open(file_path, 'r') as f:
        if file_exist_flag:  
            tag_key2index = json.load(f) 
    # 处理变长数据，tag不足3的用0填充
    new_tag, tag_key2index = get_test_var_feature(item_df, 'tag', tag_key2index, item_maxlen)
    with open(file_path, 'w') as f:  
        json.dump(tag_key2index, f)
    # 对item的数据进行编码
    item_df,all_features = encode_df(data_config,item_df,'item_data')

    return item_df,all_features,new_tag,item_id_list


# def model_predict_source(data_config,df,all_features,new_hist,data_type='user_data'):
#     print("加载模型......")
#     model_save_path = data_config['model_dir']
#     model = load_model(model_save_path) 
#     print("加载完毕......")
#     import faiss
#     # 〇，基本参数设置
#     d = 128                                           # 向量维度
#     nlist = 10  # 聚类的数量，这是一个超参数，需要根据数据集进行调整  
#     index_type = 'IVF{},Flat'.format(nlist)
#     metric_type = faiss.METRIC_INNER_PRODUCT         # 度量(相似度/距离)类型

#     if data_type == 'user_data':
#         user_embedding = update_user_embedding(model,data_config,df,all_features,new_hist)
#         faiss.normalize_L2(user_embedding)
#         np.save(data_config['embedding_saved_dir'] + "/user_embedding.npy", user_embedding)
#         print("用户embedding更新完毕......")
#     elif data_type == 'item_data':

#         item_id_df = df['ITEM_ID']

#         item_embedding = update_item_embedding(model,data_config,df,all_features,new_hist)
#         faiss.normalize_L2(item_embedding)
#         item_index = faiss.index_factory(d,index_type,metric_type) 
#         item_index.train(item_embedding)
#         item_index.add_with_ids(item_embedding, item_id_df.to_numpy()) 
#         faiss.write_index(item_index, data_config['embedding_saved_dir'] + "/item_embedding.index")
#         print("物品embedding更新完毕......")
#     else:
#         print("data_type error")


def load_model_save_path(data_config):
    print("加载模型......")
    model_save_path = data_config['model_dir']
    print("model_save_path",model_save_path)
    model = load_model(model_save_path) 
    print("加载完毕......")
    return model

# def predict_save_user_embedding(model,data_config,df,all_features,new_hist):
#     import faiss
#     # 〇，基本参数设置
#     d = 128                                           # 向量维度
#     nlist = 10  # 聚类的数量，这是一个超参数，需要根据数据集进行调整  
#     index_type = 'IVF{},Flat'.format(nlist)
#     metric_type = faiss.METRIC_INNER_PRODUCT         # 度量(相似度/距离)类型
#     user_embedding = update_user_embedding(model,data_config,df,all_features,new_hist)
#     faiss.normalize_L2(user_embedding)
#     np.save(data_config['embedding_saved_dir'] + "/user_embedding.npy", user_embedding)
#     print("用户embedding更新完毕......")

# def predict_save_item_embedding(model,data_config,df,all_features,new_hist):
#     item_id_df = df['ITEM_ID']

#     item_embedding = update_item_embedding(model,data_config,df,all_features,new_hist)

#     from pymilvus import connections, Collection, FieldSchema, CollectionSchema, DataType, utility, Index


#     connections.connect("default", host="localhost", port="19530")

#     # 定义字段
#     item_id_field = FieldSchema(name="ITEM_ID", dtype=DataType.INT64, is_primary=True, auto_id=False)
#     record_id_field = FieldSchema(name="RECORD_ID", dtype=DataType.INT64, auto_id=False)
#     embedding_field = FieldSchema(name="embedding", dtype=DataType.FLOAT_VECTOR, dim=128)
#     tag_field = FieldSchema(name="tag", dtype=DataType.VARCHAR, auto_id=False, max_length=100)

#     # 定义集合 schema
#     schema = CollectionSchema(fields=[item_id_field, record_id_field, embedding_field, tag_field], description="Collection for item embeddings")
#     collection_name = "CUPL_test"

#     # 检查集合是否存在，如果存在则删除
#     if utility.has_collection(collection_name):
#         utility.drop_collection(collection_name)
#         print(f"Collection {collection_name} already exists and has been dropped.")

#     # 创建集合
#     collection = Collection(name=collection_name, schema=schema)
#     print(f"Create collection {collection_name} successfully")

#     # 假设你已经有了item_id_df和item_embedding
#     item_ids = item_id_df.tolist()
#     record_ids = list(range(1, len(item_ids) + 1))
#     tags = ['历史'] * len(item_ids)   # 所有tag都设置为'历史'

#     # 确保 entities 是一个列表的列表，每个子列表包含一个字段的所有值
#     entities = [
#         item_ids,
#         record_ids,
#         item_embedding.tolist(),
#         tags
#     ]
#     # 插入数据
#     insert_result = collection.insert(entities)
#     collection.flush()

#     # 创建索引
#     index = {
#         "index_type": "IVF_FLAT",
#         "metric_type": "L2",
#         "params": {"nlist": 128},
#     }
#     collection.create_index(field_name="embedding", index_params=index)
#     print("物品embedding更新完毕......")

from pymilvus import connections, Collection, FieldSchema, CollectionSchema, DataType, utility, Index

def create_collection(collection_name,collection_type="USER_ID"):
    # 定义字段
    item_id_field = FieldSchema(name=collection_type, dtype=DataType.INT64, is_primary=True, auto_id=False)
    # record_id_field = FieldSchema(name="RECORD_ID", dtype=DataType.INT64, auto_id=False)
    embedding_field = FieldSchema(name="embedding", dtype=DataType.FLOAT_VECTOR, dim=128)
    # tag_field = FieldSchema(name="tag", dtype=DataType.VARCHAR, auto_id=False, max_length=100)
    # fields=[item_id_field, record_id_field, embedding_field, tag_field]
    fields=[item_id_field, embedding_field]
    # 定义集合 schema
    schema = CollectionSchema(fields=fields,description = f"Collection for item embeddings")
    collection = Collection(name=collection_name, schema=schema)
    print(f"Create collection {collection_name} successfully")

def write_to_milvus(collection_name, entities,collection_type="USER_ID"):
    connections.connect("default", host="10.240.1.3", port="19530")
    # 检查集合是否存在，如果不存在则创建
    if not utility.has_collection(collection_name):
        collection = create_collection(collection_name,collection_type)
    
    collection = Collection(name=collection_name)
    # 插入数据
    insert_result = collection.insert(entities)
    collection.flush()

    # 创建索引
    index = {
        "index_type": "IVF_FLAT",
        "metric_type": "L2",
        "params": {"nlist": 128},
    }
    collection.create_index(field_name="embedding", index_params=index)

    # print("写入Milvus成功......")

def model_predict(data_config,df,all_features,new_hist,data_type='user_data',collection_name='test',item_id_list=None):
    print("加载模型......")
    model_save_path = data_config['model_dir']
    print("model_save_path",model_save_path)
    model = load_model(model_save_path) 
    print("加载完毕......")

    # region 使用faiss保存embedding
    # import faiss
    # # 〇，基本参数设置
    # d = 128                                           # 向量维度
    # nlist = 10  # 聚类的数量，这是一个超参数，需要根据数据集进行调整  
    # index_type = 'IVF{},Flat'.format(nlist)
    # metric_type = faiss.METRIC_INNER_PRODUCT         # 度量(相似度/距离)类型

    # if data_type == 'user_data':
    #     user_embedding = update_user_embedding(model,data_config,df,all_features,new_hist)
        # faiss.normalize_L2(user_embedding)
        # np.save(data_config['embedding_saved_dir'] + "/user_embedding.npy", user_embedding)
        # print("用户embedding更新完毕......")
    # elif data_type == 'item_data':
        # item_ids = df['ITEM_ID'].to_numpy()
        # faiss.normalize_L2(item_embedding)
        # item_index = faiss.index_factory(d,index_type,metric_type) 
        # item_index.train(item_embedding)

        # item_index.add_with_ids(item_embedding, item_ids) 
        # faiss.write_index(item_index, data_config['embedding_saved_dir'] + "/item_embedding.index")
        # print("物品embedding更新完毕......")
     # endregion

    if data_type == 'user_data':
        user_embedding = update_user_embedding(model,data_config,df,all_features,new_hist)
        user_ids = df['USER_ID'].tolist()
        # 说明：这里的user_ids变量实际上是embedding_index，因为传过来的user id是编码之后的结果，不是真的user id，逻辑上的权宜之计
        user_ids = [i for i in range(len(user_ids))]
        # print("user ids",user_ids[:20])
        batch_size = 2000  # 例如，每次插入 2000 条记录
        collection_name = collection_name+"_user"
        # 使用 tqdm 创建进度条
        from tqdm import tqdm
        with tqdm(total=len(user_ids), desc="用户数据写入Milvus", unit="batch") as pbar:
            for i in range(0, len(user_ids), batch_size):
                batch_entities = [user_ids[i:i + batch_size], user_embedding[i:i + batch_size]]
                try:
                    # 尝试插入当前批次的数据
                    write_to_milvus(collection_name, batch_entities,collection_type="USER_ID")
                    pbar.update(batch_size)  # 更新进度条
                except Exception as e:
                    print(f"Error inserting batch {i // batch_size + 1}: {e}")
                    break
        
        print("用户embedding更新完毕......")
    elif data_type == 'item_data':
        
        item_ids = item_id_list
        print("item ids",item_ids[:20])
        item_embedding = update_item_embedding(model,data_config,df,all_features,new_hist)

        batch_size = 20000  # 例如，每次插入 2000 条记录
        collection_name = collection_name+"_item"
        # 使用 tqdm 创建进度条
        from tqdm import tqdm
        with tqdm(total=len(item_ids), desc="物品数据写入Milvus", unit="batch") as pbar:
            for i in range(0, len(item_ids), batch_size):
                batch_entities = [item_ids[i:i + batch_size], item_embedding[i:i + batch_size]]
                try:
                    # 尝试插入当前批次的数据
                    write_to_milvus(collection_name, batch_entities,collection_type="ITEM_ID")
                    pbar.update(batch_size)  # 更新进度条
                except Exception as e:
                    print(f"Error inserting batch {i // batch_size + 1}: {e}")
                    break  
        print("物品embedding更新完毕......")
    else:
        print("**************************************")
        print("*************data_type error**********")
        print("**************************************")

def update_user_embedding(model,data_config,user_df,all_features,new_user_hist):
    user_model_input = []
    # all_features = list(user_df.columns)
    for input_tensor in model.input:
        if input_tensor.name in all_features:
            user_model_input.append(input_tensor)
    user_tower = Model(inputs=user_model_input, outputs=model.get_layer("user_embedding").output)

    user_tower_data_input = {name : user_df[name] for name in all_features}
    user_tower_data_input["user_hist"] = new_user_hist
    # print("user_tower_input ",user_tower_data_input)
    user_embedding = user_tower.predict(user_tower_data_input)

    print("user_embedding shape: ", user_embedding.shape)
    return user_embedding
    

def update_item_embedding(model,data_config,item_df,all_features,new_tag):
    item_model_input = []
    # all_features = list(item_df.columns)
    for input_tensor in model.input:
        if input_tensor.name in all_features:
            item_model_input.append(input_tensor)
    item_tower = Model(inputs=item_model_input, outputs=model.get_layer("item_embedding").output)

    item_tower_data_input = {name : item_df[name] for name in all_features}
    item_tower_data_input["tag"] = new_tag
    # print("item_tower_input ",item_tower_data_input)
    item_embedding = item_tower.predict(item_tower_data_input)

    print("item_embedding shape: ", item_embedding.shape)
    return item_embedding
    

if __name__ == "__main__":
    # 获取数据库的all_user_data表中中user要训练的key【根据test.json中"user_data"的inputname决定key有哪些】另外还有键值user_hist，到user_df中
    
    conn = pymysql.connect(
        host='10.240.0.8',
        user='lsp_rec_book',
        passwd='WaFQ_CRqZekqzxSKgr3l',
        port=38141,
        db='lsp_rec_book_sql',  # 连接到数据库
        charset='utf8'
    )

    # ['NNU', 'XJTLU', 'YBU', 'CUPL', 'HEN', 'CAU']
    for groupName in ["JXSTNU"]:
        data_config_path = f'../configs/{groupName}.json'
        with open(data_config_path, 'r') as file:
            data_config = json.load(file)
        # 检查目录是否存在，如果不存在则创建
        if not os.path.exists(data_config['embedding_saved_dir']):
            os.makedirs(data_config['embedding_saved_dir'])

        # # 更新每日用户数据
        data_to_model,all_feats_name,new_hist = get_user_df(conn,groupName,data_config)
        # # print(data_to_model)
        # print(len(data_to_model))
        model_predict(data_config,data_to_model,all_feats_name,new_hist,data_type='user_data',collection_name=groupName)

        # 更新每日物品数据
        data_to_model,all_feats_name,new_tag,item_id_list = get_item_df(conn,groupName,data_config)
        # print(data_to_model)
        # print(len(data_to_model))
        model_predict(data_config,data_to_model,all_feats_name,new_tag,data_type='item_data',collection_name=groupName,item_id_list=item_id_list)


