import json
import hashlib
import time
import requests
import pymysql
import os
import sqlite3
import pandas as pd
import time
import datetime
from fetch_utils import generate_sign,fetch_loan_history,fetch_user,fetch_item,fetch_book_attr,fetch_metadata,create_sql_database,initial_load
from TagClass import Tag
import sys
sys.path.append("../")
from logs.log import setup_custom_logger

os.environ['CUDA_VISIBLE_DEVICES'] = '0,3,4,5,6,7' 

# 日志系统初始化
_ONE_DAY_IN_SECONDS = 60 * 60 * 24
LOG_DIR = '../Logfiles'
file_name = str(datetime.datetime.now().strftime('%Y-%m-%d'))
my_logger = setup_custom_logger(os.path.join(LOG_DIR, '%s.log' % file_name), log_level="INFO")

def update_local_database(base_url,headers,groupName,conn,process_tag_path,json_file='./configs/test.json'):

    # TODO 考虑使用多线程拉取数据
    try:
        with open(json_file, 'r', encoding='utf-8') as file:
            data = json.load(file)
    except FileNotFoundError as e:
        my_logger.error("配置文件 %s 未找到: %s", json_file, e)
        raise  # 抛出异常，停止后续执行
    except json.JSONDecodeError as e:
        my_logger.error("解析配置文件 %s 时出错: %s", json_file, e)
        raise  # 抛出异常，停止后续执行
    except Exception as e:
        my_logger.error("读取配置文件时发生未知错误: %s", e)
        raise  # 抛出异常，停止后续执行

    try:
        create_sql_database(conn, groupName)
    except pymysql.MySQLError as e:
        my_logger.error("创建数据库表时发生数据库错误: %s", e)
        raise  # 抛出异常，停止后续执行
    except Exception as e:
        my_logger.error("创建数据库表时发生未知错误: %s", e)
        raise  # 抛出异常，停止后续执行

    error_count = 0
    max_errors = 8
    while(True):
        try:
            result = fetch_metadata(base_url, headers, data['metadata_last_id'], data['metadata_record_last_id'])
            error_count = 0 # 重置错误计数
            if not result['success']:
                my_logger.warning("fetch_metadata 请求成功，但返回的结果标志为 False: %s", result)
        except requests.exceptions.RequestException as e:
            my_logger.error("请求 fetch_metadata 时发生网络错误: %s", e)
            error_count += 1
            if error_count >= max_errors:
                my_logger.error("已尝试多次网络连接，均失败，故终止程序！！！")
                break
        except ValueError as e:
            my_logger.error("解析 fetch_metadata 返回的数据时发生错误: %s", e)
            raise e
        except KeyError as e:
            my_logger.error("fetch_metadata 返回的数据缺少预期的字段: %s", e)
        except Exception as e:
            my_logger.error("调用 fetch_metadata 时发生未知错误: %s", e)

        if result['success']:
            if result['data']['size'] == 0:
                my_logger.warning("拉取结果时数据size为0")
                break
            data['metadata_last_id'],data['metadata_record_last_id'] = result['data']['dataList'][-1]['mdId'],result['data']['dataList'][-1]['recordId']
            # print(data['metadata_last_id'],data['metadata_record_last_id'])
            try:
                conn.begin() # 显式调用事务机制
                with conn.cursor() as cursor:
                    sql = """
                        INSERT INTO metadata (MD_ID, RECORD_ID, FIELD, FIELD_DATA, LIB_CODE, GROUP_CODE, CREATE_BY, CREATE_DATE, UPDATE_BY, UPDATE_DATE) 
                        VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s)
                        """
                    sql = sql.replace('metadata', f'{groupName}_metadata')
                    # cursor.execute(f'TRUNCATE TABLE {groupName}_metadata;')
                    # break
                    # 执行批量插入操作
                    # cursor.executemany(sql, result['data']['dataList'])
                    cursor.executemany(sql, [(item['mdId'], item['recordId'], item['field'], item['fieldData'], item['libCode'], item['groupCode'], item['createBy'], item['createDate'], item['updateBy'], item['updateDate']) for item in result['data']['dataList']])
                    conn.commit()  # 提交事务
            except pymysql.MySQLError as e:
                #   回滚事务
                conn.rollback()
                my_logger.error("数据库错误，事务已回滚: %s", e)
            except Exception as e:
                # 回滚事务
                conn.rollback()
                my_logger.error("发生未知错误，事务已回滚: %s", e)
        if result['data']['finalRes']:
            break
        with open(json_file, 'w', encoding='utf-8') as file:
            json.dump(data, file, ensure_ascii=False, indent=4)
    my_logger.info("metadata更新完毕")
    # time.sleep(10)
    try:
        # 数据处理逻辑
        with open(json_file, 'w', encoding='utf-8') as file:
            json.dump(data, file, ensure_ascii=False, indent=4)
    except Exception as e:
        my_logger.error("记录进度时发生错误: %s", e)

    while(True):
        try:
            # 调用 fetch_book_attr 获取书籍属性
            result = fetch_book_attr(base_url, headers, data['record_last_id'])
            
            # 检查结果是否成功
            if not result['success']:
                my_logger.warning("fetch_book_attr 请求成功，但返回的结果标志为 False: %s", result)

        except requests.exceptions.RequestException as e:
            # 处理网络请求异常
            my_logger.error("请求 fetch_book_attr 时发生网络错误: %s", e)

        except ValueError as e:
            # 处理返回的数据解析错误
            my_logger.error("解析 fetch_book_attr 返回的数据时发生错误: %s", e)

        except KeyError as e:
            # 处理缺少预期字段的错误
            my_logger.error("fetch_book_attr 返回的数据缺少预期的字段: %s", e)

        except Exception as e:
            # 处理其他未知异常
            my_logger.error("调用 fetch_book_attr 时发生未知错误: %s", e)

        if result['success']:
            if result['data']['size'] == 0:
                break
            data['record_last_id'] = result['data']['dataList'][-1]['recordId']
            try:
                with conn.cursor() as cursor:
                    # 准备批量插入语句
                    sql = """
                    INSERT INTO book_attr (RECORD_ID,CALL_NO,TITLE,TITLE_S,ISBN13,ISBN10,ISBN,AUTHOR,PUBLISHER,PUB_DATE,PUB_YEAR,SERIRL_FLAG,PARTITIONER,VERSION,SERIES,POST_ISSUE_NO,CN_NO,VERSION_EXP,E_ISBN) VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)
                    """
                    sql = sql.replace('book_attr', f'{groupName}_book_attr')

                    # cursor.execute(f'TRUNCATE TABLE {groupName}_book_attr;')
                    # break
                    # 执行批量插入操作
                    # cursor.executemany(sql, result['data']['dataList'])
                    cursor.executemany(sql, [(item["recordId"],item["callNo"],item["title"],item["titleS"],item["isbn13"],item["isbn10"],item["isbn"],item["author"],item["publisher"],item["pubDate"],item["pubYear"],item["serirlFlag"],item["partitioner"],item["version"],item["series"],item["postIssueNo"],item["cnNo"],item["versionExp"],item["eisbn"]) for item in result['data']['dataList']])
                    conn.commit()
            except pymysql.MySQLError as e:
                #   回滚事务
                conn.rollback()
                my_logger.error("数据库错误，事务已回滚: %s", e)
            except Exception as e:
                # 回滚事务
                conn.rollback()
                my_logger.error("发生未知错误，事务已回滚: %s", e)

        if result['data']['finalRes']:
            break
        with open(json_file, 'w', encoding='utf-8') as file:
            json.dump(data, file, ensure_ascii=False, indent=4)
    my_logger.info("book_attr更新完毕")
    # time.sleep(10)
    with open(json_file, 'w', encoding='utf-8') as file:
        json.dump(data, file, ensure_ascii=False, indent=4)

    tagClass = Tag(process_tag_path)
    while(True):
        try:
            # 调用 fetch_item 获取 item 信息
            result = fetch_item(base_url, headers, data['item_last_id'])
            
            # 检查返回的结果是否成功
            if not result['success']:
                my_logger.warning("fetch_item 请求成功，但返回的结果标志为 False: %s", result)
            
        except requests.exceptions.RequestException as e:
            # 处理请求相关的异常
            my_logger.error("请求 fetch_item 时发生网络错误: %s", e)

        except ValueError as e:
            # 处理返回的数据解析错误，例如 JSON 格式问题
            my_logger.error("解析 fetch_item 返回的数据时发生错误: %s", e)

        except KeyError as e:
            # 处理缺少预期字段的错误，例如 result['data'] 或 result['data']['dataList']
            my_logger.error("fetch_item 返回的数据缺少预期的字段: %s", e)

        except Exception as e:
            # 捕获其他未预料的异常
            my_logger.error("调用 fetch_item 时发生未知错误: %s", e)

        if result['success']:
            if result['data']['size'] == 0:
                break
            data['item_last_id'] = result['data']['dataList'][-1]['itemId']
            try:
                with conn.cursor() as cursor:
                    # 准备批量插入语句
                    sql = """
                    INSERT INTO all_item_data (ITEM_ID,CUR_LOCATION_ID,TOTAL_CIRC_COUNT,GET_MODE_CODE,CIRC_NUMBER,RECORD_ID,UPDATE_DATE,tag) VALUES (%s,%s,%s,%s,%s,%s,%s,%s)
                    """
                    # sql = """
                    #     INSERT INTO all_item_data (ITEM_ID, CUR_LOCATION_ID, TOTAL_CIRC_COUNT, GET_MODE_CODE, CIRC_NUMBER, UPDATE_DATE, tag)
                    #     VALUES (%(ITEM_ID)s, %(CUR_LOCATION_ID)s, %(TOTAL_CIRC_COUNT)s, %(GET_MODE_CODE)s, %(CIRC_NUMBER)s, %(UPDATE_DATE)s, %(tag)s)
                    #     """
                    # 动态修改SQL语句
                    sql = sql.replace('all_item_data', f'{groupName}_all_item_data')
                    # 数据规范化
                    result_df = pd.json_normalize(result['data']['dataList'])
                    # 提取字段
                    record_id_list = result_df["recordId"].to_list()
                    update_list = result_df['updateDate'].to_list()
                    # 调用了 tagClass.add_tag() 方法，为每个 recordId 添加标签。
                    tag_df = tagClass.add_tag(record_id_list, groupName, cursor)
                    # 特征选择，遍历 data['data_config']['item_data']，并根据 feature_type 来区分稀疏特征（IdFeature）和密集特征（DenseFeature）。
                    item_sparse_features = []  
                    item_dense_features = []
                    for item_fea in data['data_config']['item_data']:
                        if item_fea["feature_type"] == "IdFeature":
                            item_sparse_features.append(item_fea["input_name"])
                        elif item_fea["feature_type"] == "DenseFeature":
                            item_dense_features.append(item_fea["input_name"])
                    # 数据合并和预处理
                    item_df = pd.merge(result_df,tag_df, on='recordId')
                    item_df = item_df[['itemId','curLocationId','totalCircCount','getModeCode','circNumber','recordId','updateDate','tag']]
                    item_df.rename(columns={'itemId': 'ITEM_ID','curLocationId':'CUR_LOCATION_ID','totalCircCount':'TOTAL_CIRC_COUNT','getModeCode':'GET_MODE_CODE','circNumber':'CIRC_NUMBER','recordId':'RECORD_ID','updateDate':'UPDATE_DATE'}, inplace=True)
                    item_df[item_sparse_features] = item_df[item_sparse_features].fillna(-2)
                    item_df[item_dense_features] = item_df[item_dense_features].fillna(0)
                    item_df[item_dense_features] = item_df[item_dense_features].astype(float)
                    item_df['tag'] = item_df['tag'].astype(str) 
                    # 删除获取到的重复的item数据
                    item_df = item_df.drop_duplicates(subset='ITEM_ID', keep='last')
                    # 删除ITEM_ID已经存在于本地数据库的数据（原则上以后一条为标准）
                    
                    item_id_list = item_df['ITEM_ID'].to_list()
                    sql_search = "DELETE FROM {0}_all_item_data WHERE ITEM_ID IN %s".format(groupName)
                    cursor.execute(sql_search, (item_id_list,))  # 注意这里直接传递列表
                    data_to_insert = [row for row in item_df.itertuples(index=False)]
                    cursor.executemany(sql,data_to_insert)                
                    conn.commit()
            except pymysql.MySQLError as e:
                #   回滚事务
                conn.rollback()
                my_logger.error("数据库错误，事务已回滚: %s", e)
            except Exception as e:
                # 回滚事务
                conn.rollback()
                my_logger.error("发生未知错误，事务已回滚: %s", e)


        if result['data']['finalRes']:
            break

        with open(json_file, 'w', encoding='utf-8') as file:
            json.dump(data, file, ensure_ascii=False, indent=4)

    my_logger.info("all_item_data更新完毕")
    with open(json_file, 'w', encoding='utf-8') as file:
        json.dump(data, file, ensure_ascii=False, indent=4)

    while(True):
        try:
            # 调用 fetch_user 函数
            result = fetch_user(base_url, headers, data['user_last_id'])
            # print("result", result)
            # 检查结果是否成功
            if not result['success']:
                my_logger.warning("fetch_user 请求成功，但返回的结果标志为 False: %s", result)

        except Exception as e:
            # 捕获任何异常并记录日志或打印错误
            my_logger.error(f"Error occurred while fetching user data: {e}")
            # 如果需要，可以选择继续抛出异常或处理错误后继续执行
            # raise e  # 取消注释以重新抛出异常
        if result['success']:
            if result['data']['size'] == 0:
                break

            data['user_last_id'] = result['data']['dataList'][-1]['userId']
            try:
                with conn.cursor() as cursor:
                    # 准备批量插入语句
                    sql = """
                    INSERT INTO all_user_data (USER_ID,GENDER,COLLEGE_YEAR_ID,COLLEGE_DEPT_ID,MAJOR_ID,TOTAL_LEND,RESEARCHER_FLAG,EDU_ID,COLLEGE_CLASS_ID,UPDATE_DATE,user_hist) VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)
                    """
                    sql = sql.replace('all_user_data', f'{groupName}_all_user_data')

                    result_df = pd.json_normalize(result['data']['dataList'])
                    #user hist的建立,初始化为''

                    user_df = result_df[['userId','gender','collegeYearId','collegeDeptId','majorId','totalLend','researcherFlag','eduId','collegeClassId','updateDate']]
                    # user_df['user_hist'] = ''  # 创建 'user_hist' 列
                    # user_df.loc[:, 'user_hist'] = ''  # 设置 'user_hist' 列的所有值为空字符串
                    user_df = user_df.assign(user_hist='')
                    # user_df.rename(columns={'userId':'USER_ID','gender':'GENDER','collegeYearId':'COLLEGE_YEAR_ID','collegeDeptId':'COLLEGE_DEPT_ID','majorId':'MAJOR_ID','totalLend':'TOTAL_LEND','researcherFlag':'RESEARCHER_FLAG','eduId':'EDU_ID','collegeClassId':'COLLEGE_CLASS_ID','updateDate':'UPDATE_DATE'}, inplace=False)
                    user_df.columns = ['USER_ID','GENDER','COLLEGE_YEAR_ID','COLLEGE_DEPT_ID','MAJOR_ID','TOTAL_LEND','RESEARCHER_FLAG','EDU_ID','COLLEGE_CLASS_ID','UPDATE_DATE','user_hist']
                    
                    user_sparse_features = [] 
                    user_dense_features = []
                    for user_fea in data['data_config']['user_data']:
                        if user_fea["feature_type"] == "IdFeature":
                            user_sparse_features.append(user_fea["input_name"])
                        elif user_fea["feature_type"] == "DenseFeature":
                            user_dense_features.append(user_fea["input_name"])
                    user_df[user_sparse_features] = user_df[user_sparse_features].fillna(-2)
                    user_df[user_dense_features] = user_df[user_dense_features].fillna(0)
                    user_df[user_dense_features] = user_df[user_dense_features].astype(float)

                    
                    # 删除获取到的重复的user数据
                    user_df = user_df.drop_duplicates(subset='USER_ID', keep='last')
                    # 删除ITEM_ID已经存在于本地数据库的数据（原则上以后一条为标准）
                    
                    user_id_list = user_df['USER_ID'].to_list()
                    sql_search = "DELETE FROM {0}_all_user_data WHERE USER_ID IN %s".format(groupName)
                    cursor.execute(sql_search, (user_id_list,))  # 注意这里直接传递列表
                    data_to_insert = [row for row in user_df.itertuples(index=False)]
                    cursor.executemany(sql,data_to_insert)                
                    conn.commit()
            except pymysql.MySQLError as e:
                #   回滚事务
                conn.rollback()
                my_logger.error("数据库错误，事务已回滚: %s", e)
            except Exception as e:
                # 回滚事务
                conn.rollback()
                my_logger.error("发生未知错误，事务已回滚: %s", e)
                # raise
        # print("let us check the result: ", result)
        if result['data']['finalRes']:
            break
        with open(json_file, 'w', encoding='utf-8') as file:
            json.dump(data, file, ensure_ascii=False, indent=4)

    my_logger.info("all_user_data更新完毕")
    with open(json_file, 'w', encoding='utf-8') as file:
        json.dump(data, file, ensure_ascii=False, indent=4)

    # print("hellpw")
    # TODO: 这边还可以优化，不用从数据库提取user_hist，直接拿出来的loan history搞完后更新的时候拼接 contact(user_hist %s),减少对内存的消耗
    while(True):
        try:
            
            result = fetch_loan_history(base_url,headers,data['loan_last_id'])
            # 检查结果是否成功
            if not result['success']:
                my_logger.warning("fetch_loan_history 请求成功，但返回的结果标志为 False: %s", result)
        except Exception as e:
            my_logger.error(f"Error occurred while fetching load history: {e}")

        if result['success']:

            if result['data']['size'] == 0:
                my_logger.warning("结果数据中值size为0")
                break

            data['loan_last_id'] = result['data']['dataList'][-1]['loanHistId']
            try:
                conn.begin()
                with conn.cursor() as cursor:
                    result_df = pd.json_normalize(result['data']['dataList'])
                    result_df = result_df[result_df['loanType'] == '0'][['loanHistId','itemId','userId','createDate','loanType']]
                    # 准备SQL查询语句
                    sql_select = f"SELECT USER_ID, user_hist FROM {groupName}_all_user_data WHERE USER_ID IN %s"
                    user_ids = list(result_df['userId'].unique())
                    if user_ids:
                        # 考虑user_ids存在的情况
                        cursor.execute(sql_select,(user_ids,))
                        user_hist_data = cursor.fetchall()
                        # 创建一个映射字典，将USER_ID映射到user_hist
                        user_hist_map = {user_id: user_hist for user_id, user_hist in user_hist_data}
                    
                        # 准备批量更新的数据
                        updates = []
                    
                        for index, row in result_df.iterrows():
                            if row['userId'] in user_hist_map:
                                user_hist_map[row['userId']] +=  str(row['itemId']) + '|'
                        # updates.append((updated_user_hist, row['userId']))
                        # print(updates)
                        # 执行批量更新
                        updates = [(user_hist, user_id) for user_id, user_hist in user_hist_map.items()]
                        # 准备SQL更新语句
                        sql_update = f"UPDATE {groupName}_all_user_data SET user_hist = %s WHERE USER_ID = %s"
                        # sql_update = sql_update.replace('all_user_data', f'{groupName}_all_user_data')
                        cursor.executemany(sql_update, updates)
                        conn.commit() 
                    else:
                        my_logger.warning("未找到用户ID，跳过数据库操作！！！")
            
            except pymysql.MySQLError as e:
                # 发生 MySQL 错误时回滚事务
                conn.rollback()
                # 记录错误信息
                my_logger.error(f"An error occurred during database operation: {e}")
                # 可以根据需要选择重新抛出异常
                # raise e

            except Exception as e:
                # 捕获其他可能的异常并回滚事务
                conn.rollback()
                my_logger.error(f"An unexpected error occurred: {e}")
                # 可以选择抛出异常
                # raise e
        if result['data']['finalRes']:
            break
        # 用于实时保存处理进度
        with open(json_file, 'w', encoding='utf-8') as file:
            json.dump(data, file, ensure_ascii=False, indent=4)

    my_logger.info("根据借阅历史更新用户的行为序列完毕")
    # 用于最终保存完整进度
    with open(json_file, 'w', encoding='utf-8') as file:
        json.dump(data, file, ensure_ascii=False, indent=4)
    conn.close()

if __name__ == "__main__":

    # 记录了标签的详细解释信息，用于之后对物品进行打标签
    process_tag_path = "/opt/wyh/LSP_book_rec/Label_books_with_categories/processed_data/SourceUniversity/process_tag.csv"
    # 'NNU' 只能在晚上十点以后拉取

    # json_file = f'../configs/test.json'
    # base_url, headers,conn = initial_load(json_file)
    # update_local_database(base_url, headers=headers, groupName="YBU")
    # for groupName in ['CUMT', 'SUDA', 'NJU', "NJUPT", 'HHU', "NNU"]:
    # 循环处理每个 groupName
    # for groupName in ['SUDA', 'NJUPT', 'NJULIB']:
    for groupName in ['NJULIB']:
        json_file = f'../configs/{groupName}.json'

        try:
            # 初始化加载
            base_url, headers, conn = initial_load(json_file)
            
            # 更新数据库
            update_local_database(base_url, headers, groupName, conn, process_tag_path, json_file)
        except Exception as e:
            # 捕获异常，判断是否由于签名失败
            my_logger.error(f"Error occurred for {groupName}: {e}")
            my_logger.info("重新生成headers")
            # 重新生成 headers（重新生成签名）
            base_url, headers, conn = initial_load(json_file)

            # 再次尝试更新数据库
            try:
                update_local_database(base_url, headers, groupName, conn, process_tag_path, json_file)
            except Exception as retry_e:
                # 如果重试仍然失败，记录并退出
                my_logger.error(f"Retry failed for {groupName}: {retry_e}")

