import json
import hashlib
import time
import requests
import pymysql
import os
import pandas as pd
import time
import datetime
import sys
sys.path.append("../")
from Dataset.fetch_utils import fetch_loan_history,fetch_user,fetch_item,fetch_book_attr,fetch_metadata,create_sql_database,initial_load
from Dataset.TagClass import Tag
# from fetch_utils import fetch_loan_history,fetch_user,fetch_item,fetch_book_attr,fetch_metadata,create_sql_database,initial_load
# from TagClass import Tag

from logs.log import setup_custom_logger

# todo: GPU资源分配问题的解决
os.environ['CUDA_VISIBLE_DEVICES'] = '2,3,7' 

# 日志系统初始化
_ONE_DAY_IN_SECONDS = 60 * 60 * 24
log_dir = os.path.join(os.path.dirname(__file__), '..', 'Logfiles')
file_name = str(datetime.datetime.now().strftime('%Y-%m-%d'))
my_logger =  setup_custom_logger(os.path.join(log_dir, f'{file_name}.log'), log_level="INFO")

# exit()

class pullDataFromSql(object):

    """
        从远程阿里云/超星云/超星公图云中拉取数据

    """

    def __init__(self,
                 config_path:str,
                 group_name:str,
                 db_config_path: str
                 ):
        
        self.config_path = config_path  # 配置文件的地址
        self.group_name = group_name  # 机构名称
        # 记录了标签的详细解释信息，用于之后对物品进行打标签
        self.process_tag_path = "/opt/wyh/LSP_book_rec/Label_books_with_categories/processed_data/SourceUniversity/process_tag.csv"
        self.tagClass = Tag(self.process_tag_path)
        # self.error_count = 0
        self.max_errors = 8
        # 初始化日志、数据库连接和配置
        # my_logger = self._setup_logger()
        self.db_config_path = db_config_path
        print("self.db_config_path", self.db_config_path)
        # 打开配置文件
        # db_dir = "/opt/wyh/LSP_book_rec/global_configs"
        # db_config_path = os.path.join(db_dir, "mysql_db_config.json")
        print("db_config_path", self.db_config_path)
        self.mysql_db_config = self.load_config_file(self.db_config_path)
        self.base_url, self.headers, self.conn = self.init_load_database(config_path=self.config_path)
        print("init pull data success!")
    # def _setup_logger(self):
    #     """
    #     设置日志记录器
    #     """
    #     log_dir = '../Logfiles'
    #     file_name = str(datetime.datetime.now().strftime('%Y-%m-%d'))
    #     return setup_custom_logger(os.path.join(log_dir, f'{file_name}.log'), log_level="INFO")



    def init_load_database(self, config_path="../configs/test.json"):
        """
            初始化数据库连接
        """
        return initial_load(config_path=config_path)
    
    def save_config_file(self, data):
        """
            保存配置文件
        """
        with open(self.config_path, 'w', encoding='utf-8') as file:
            json.dump(data, file, ensure_ascii=False, indent=4)

    def load_config_file(self, json_file='../configs/test.json'):
        """
        加载配置文件
        """
        print("json_file", json_file)
        try:
            with open(json_file, 'r', encoding='utf-8') as file:

                data = json.load(file)
        except (FileNotFoundError, json.JSONDecodeError) as e:
            my_logger.error(f"加载配置文件 {json_file} 时发生错误: {e}")
            raise
        return data

    def create_database(self):
        """
        创建数据库表
        """
        try:
            create_sql_database(self.conn, self.group_name)
        except pymysql.MySQLError as e:
            my_logger.error(f"创建数据库表时发生数据库错误: {e}")
            raise

    def fetch_meta_data_proc(self, data):
        """
            拉取元数据并更新数据库
        """
        error_count = 0
        while(True):

            try:
                result = fetch_metadata(self.base_url, self.headers, data['metadata_last_id'], data['metadata_record_last_id'])
                if result['success']:

                    error_count = 0 # 重置错误计数

                else:
                    my_logger.warning("fetch_metadata 请求成功，但返回的结果标志为 False: %s", result)
            except requests.exceptions.RequestException as e:
                my_logger.error("请求 fetch_metadata 时发生网络错误: %s", e)
                error_count += 1
                if error_count >= self.max_errors:
                    my_logger.error("已尝试多次网络连接，均失败，故终止程序！！！")
                    break
                continue
            except ValueError as e:
                my_logger.error("解析 fetch_metadata 返回的数据时发生错误: %s", e)
                raise e
            except KeyError as e:
                my_logger.error("fetch_metadata 返回的数据缺少预期的字段: %s", e)
            except Exception as e:
                my_logger.error("调用 fetch_metadata 时发生未知错误: %s", e)

            if result['success']:
                if result['data']['size'] == 0:
                    my_logger.warning("拉取结果时数据size为0")
                    break
                data['metadata_last_id'],data['metadata_record_last_id'] = result['data']['dataList'][-1]['mdId'],result['data']['dataList'][-1]['recordId']
                try:
                    self.conn.begin() # 显式调用事务机制
                    with self.conn.cursor() as cursor:
                        # sql = """
                        #     INSERT INTO metadata (MD_ID, RECORD_ID, FIELD, FIELD_DATA, LIB_CODE, GROUP_CODE, CREATE_BY, CREATE_DATE, UPDATE_BY, UPDATE_DATE) 
                        #     VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s)
                        #     """
                        sql = self.mysql_db_config['sql_statements']['meta_data']['insert_meta_data']
                        sql = sql.replace('metadata', f'{self.group_name}_metadata')
                        # 执行批量插入操作
                        # cursor.executemany(sql, result['data']['dataList'])
                        cursor.executemany(sql, [(item['mdId'], item['recordId'], item['field'], item['fieldData'], item['libCode'], item['groupCode'], item['createBy'], item['createDate'], item['updateBy'], item['updateDate']) for item in result['data']['dataList']])
                        self.conn.commit()  # 提交事务
                except pymysql.MySQLError as e:
                    #   回滚事务
                    self.conn.rollback()
                    my_logger.error("数据库错误，事务已回滚: %s", e)
                except Exception as e:
                    # 回滚事务
                    self.conn.rollback()
                    my_logger.error("发生未知错误，事务已回滚: %s", e)
            if result['data']['finalRes']:
                my_logger.info("已经拉取了目前存在的所有metadata数据")
                break
            self.save_config_file(data)
        my_logger.info("metadata更新完毕")
        try:
            # 数据处理逻辑
            self.save_config_file(data)
        except Exception as e:
            my_logger.error("记录进度时发生错误: %s", e)


    def _insert_book_attr_to_db(self, result: dict):
        """
        将书籍属性数据插入数据库
        """
        try:
            with self.conn.cursor() as cursor:
                sql = self.mysql_db_config['sql_statements']['book_attr']['insert_book_attr']
                sql = sql.replace("book_attr", f"{self.group_name}_book_attr")

                cursor.executemany(sql, [
                    (
                        item["recordId"], item["callNo"], item["title"],
                        item["titleS"], item["isbn13"], item["isbn10"],
                        item["isbn"], item["author"], item["publisher"],
                        item["pubDate"], item["pubYear"], item["serirlFlag"],
                        item["partitioner"], item["version"], item["series"],
                        item["postIssueNo"], item["cnNo"], item["versionExp"],
                        item["eisbn"]
                    )
                    for item in result['data']['dataList']
                ])
            self.conn.commit()
        except pymysql.MySQLError as e:
            self.conn.rollback()
            my_logger.error(f"数据库错误，事务已回滚: {e}")
        except Exception as e:
            self.conn.rollback()
            my_logger.error(f"未知错误，事务已回滚: {e}")

    def fetch_book_attr_proc(self, data):
        """
            拉取书籍属性信息
        
        """
        while(True):
            try:
                # 调用 fetch_book_attr 获取书籍属性
                result = fetch_book_attr(self.base_url, self.headers, data['record_last_id'])
                
                # 检查结果是否成功
                if not result['success']:
                    my_logger.warning("fetch_book_attr 请求成功，但返回的结果标志为 False: %s", result)

            except requests.exceptions.RequestException as e:
                # 处理网络请求异常
                my_logger.error("请求 fetch_book_attr 时发生网络错误: %s", e)

            except ValueError as e:
                # 处理返回的数据解析错误
                my_logger.error("解析 fetch_book_attr 返回的数据时发生错误: %s", e)

            except KeyError as e:
                # 处理缺少预期字段的错误
                my_logger.error("fetch_book_attr 返回的数据缺少预期的字段: %s", e)

            except Exception as e:
                # 处理其他未知异常
                my_logger.error("调用 fetch_book_attr 时发生未知错误: %s", e)
                break

            if result['success']:
                if result['data']['size'] == 0:
                    break
                data['record_last_id'] = result['data']['dataList'][-1]['recordId']
                self._insert_book_attr_to_db(result=result)

                if result['data']['finalRes']:
                    break
                # 实时存储当前进度
                self.save_config_file(data)
            else:
                my_logger.warning(f"fetch_book_attr 返回 success=False: {result}")
                break
        my_logger.info("book_attr更新完毕")
        # time.sleep(10)
        self.save_config_file(data)



    def fetch_item_proc(self, data):
        """
            拉取item数据并更新数据库
        """
        while(True):
            try:
                # 调用 fetch_item 获取 item 信息
                result = fetch_item(self.base_url, self.headers, data['item_last_id'])
                
                # 检查返回的结果是否成功
                if not result['success']:
                    my_logger.warning("fetch_item 请求成功，但返回的结果标志为 False: %s", result)
                
            except requests.exceptions.RequestException as e:
                # 处理请求相关的异常
                my_logger.error("请求 fetch_item 时发生网络错误: %s", e)

            except ValueError as e:
                # 处理返回的数据解析错误，例如 JSON 格式问题
                my_logger.error("解析 fetch_item 返回的数据时发生错误: %s", e)

            except KeyError as e:
                # 处理缺少预期字段的错误，例如 result['data'] 或 result['data']['dataList']
                my_logger.error("fetch_item 返回的数据缺少预期的字段: %s", e)

            except Exception as e:
                # 捕获其他未预料的异常
                my_logger.error("调用 fetch_item 时发生未知错误: %s", e)

            if result['success']:
                if result['data']['size'] == 0:
                    break
                data['item_last_id'] = result['data']['dataList'][-1]['itemId']
                try:
                    with self.conn.cursor() as cursor:
                        # 准备批量插入语句
                        # sql = """
                        # INSERT INTO all_item_data (ITEM_ID,CUR_LOCATION_ID,TOTAL_CIRC_COUNT,GET_MODE_CODE,CIRC_NUMBER,RECORD_ID,UPDATE_DATE,tag) VALUES (%s,%s,%s,%s,%s,%s,%s,%s)
                        # """
                        sql = self.mysql_db_config['sql_statements']['item_data']['insert_item']
                        # 动态修改SQL语句
                        sql = sql.replace('all_item_data', f'{self.group_name}_all_item_data')
                        # 数据规范化
                        result_df = pd.json_normalize(result['data']['dataList'])
                        # 提取字段
                        record_id_list = result_df["recordId"].to_list()
                        # update_list = result_df['updateDate'].to_list()
                        # 调用了 tagClass.add_tag() 方法，为每个 recordId 添加标签。
                        tag_df = self.tagClass.add_tag(record_id_list, self.group_name, cursor)
                        # 特征选择，遍历 data['data_config']['item_data']，并根据 feature_type 来区分稀疏特征（IdFeature）和密集特征（DenseFeature）。
                        item_sparse_features = []  
                        item_dense_features = []
                        for item_fea in data['data_config']['item_data']:
                            if item_fea["feature_type"] == "IdFeature":
                                item_sparse_features.append(item_fea["input_name"])
                            elif item_fea["feature_type"] == "DenseFeature":
                                item_dense_features.append(item_fea["input_name"])
                        # 数据合并和预处理
                        item_df = pd.merge(result_df,tag_df, on='recordId')
                        item_df = item_df[['itemId','curLocationId','totalCircCount','getModeCode','circNumber','recordId','updateDate','tag']]
                        item_df.rename(columns={'itemId': 'ITEM_ID','curLocationId':'CUR_LOCATION_ID','totalCircCount':'TOTAL_CIRC_COUNT','getModeCode':'GET_MODE_CODE','circNumber':'CIRC_NUMBER','recordId':'RECORD_ID','updateDate':'UPDATE_DATE'}, inplace=True)
                        item_df[item_sparse_features] = item_df[item_sparse_features].fillna(-2)
                        item_df[item_dense_features] = item_df[item_dense_features].fillna(0)
                        item_df[item_dense_features] = item_df[item_dense_features].astype(float)
                        item_df['tag'] = item_df['tag'].astype(str) 
                        # 删除获取到的重复的item数据
                        item_df = item_df.drop_duplicates(subset='ITEM_ID', keep='last')
                        # 删除ITEM_ID已经存在于本地数据库的数据（原则上以后一条为标准）
                        
                        item_id_list = item_df['ITEM_ID'].to_list()
                        # sql_search = "DELETE FROM {0}_all_item_data WHERE ITEM_ID IN %s".format(groupName)
                        sql_search = str(self.mysql_db_config['sql_statements']['item_data']['delete_item']).format(self.group_name)
                        cursor.execute(sql_search, (item_id_list,))  # 注意这里直接传递列表
                        data_to_insert = [row for row in item_df.itertuples(index=False)]
                        cursor.executemany(sql,data_to_insert)                
                        self.conn.commit()
                except pymysql.MySQLError as e:
                    #   回滚事务
                    self.conn.rollback()
                    my_logger.error("数据库错误，事务已回滚: %s", e)
                except Exception as e:
                    # 回滚事务
                    self.conn.rollback()
                    my_logger.error("发生未知错误，事务已回滚: %s", e)


                if result['data']['finalRes']:
                    break

                self.save_config_file(data)
            else:
                my_logger.warning(f"fetch_item 返回 success=False: {result}")
                break

        my_logger.info("all_item_data更新完毕")
        self.save_config_file(data)

    def _insert_item_to_db(self, result):
        """
        将item数据插入数据库
        """
        try:
            with self.conn.cursor() as cursor:
                sql = self.mysql_db_config['sql_statements']['item_data']['insert_item']
                sql = sql.replace('all_item_data', f'{self.group_name}_all_item_data')
                result_df = pd.json_normalize(result['data']['dataList'])
                # 合并标签数据等
                cursor.executemany(sql, result_df.to_records(index=False))
                self.conn.commit()
        except pymysql.MySQLError as e:
            self.conn.rollback()
            my_logger.error(f"数据库错误，事务已回滚: {e}")
        except Exception as e:
            self.conn.rollback()
            my_logger.error(f"发生未知错误，事务已回滚: {e}")

    def fetch_user_proc(self, data):
        """
            获取user数据
        
        """
        while(True):
            try:
                # 调用 fetch_user 函数
                result = fetch_user(self.base_url, self.headers, data['user_last_id'])

            except Exception as e:
                # 捕获任何异常并记录日志或打印错误
                my_logger.error(f"Error occurred while fetching user data: {e}")
                break
                # 如果需要，可以选择继续抛出异常或处理错误后继续执行
                # raise e  # 取消注释以重新抛出异常
            if result['success']:
                if result['data']['size'] == 0:
                    break
                data['user_last_id'] = result['data']['dataList'][-1]['userId']
                try:
                    with self.conn.cursor() as cursor:
                        # 准备批量插入语句
                        # sql = """
                        # INSERT INTO all_user_data (USER_ID,GENDER,COLLEGE_YEAR_ID,COLLEGE_DEPT_ID,MAJOR_ID,TOTAL_LEND,RESEARCHER_FLAG,EDU_ID,COLLEGE_CLASS_ID,UPDATE_DATE,user_hist) VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)
                        # """
                        sql = self.mysql_db_config['sql_statements']['user_data']['insert_user_data']
                        sql = sql.replace('all_user_data', f'{self.group_name}_all_user_data')

                        result_df = pd.json_normalize(result['data']['dataList'])
                        #user hist的建立,初始化为''
                        user_df = result_df[['userId','gender','collegeYearId','collegeDeptId','majorId','totalLend','researcherFlag','eduId','collegeClassId','updateDate']].copy()
                        # user_df['user_hist'] = ''  # 创建 'user_hist' 列
                        # user_df.loc[:, 'user_hist'] = ''  # 设置 'user_hist' 列的所有值为空字符串
                        user_df = user_df.assign(user_hist='')
                        # user_df.rename(columns={'userId':'USER_ID','gender':'GENDER','collegeYearId':'COLLEGE_YEAR_ID','collegeDeptId':'COLLEGE_DEPT_ID','majorId':'MAJOR_ID','totalLend':'TOTAL_LEND','researcherFlag':'RESEARCHER_FLAG','eduId':'EDU_ID','collegeClassId':'COLLEGE_CLASS_ID','updateDate':'UPDATE_DATE'}, inplace=False)
                        user_df.columns = ['USER_ID','GENDER','COLLEGE_YEAR_ID','COLLEGE_DEPT_ID','MAJOR_ID','TOTAL_LEND','RESEARCHER_FLAG','EDU_ID','COLLEGE_CLASS_ID','UPDATE_DATE','user_hist']
                        # 特征选择
                        user_sparse_features = [] 
                        user_dense_features = []
                        for user_fea in data['data_config']['user_data']:
                            if user_fea["feature_type"] == "IdFeature":
                                user_sparse_features.append(user_fea["input_name"])
                            elif user_fea["feature_type"] == "DenseFeature":
                                user_dense_features.append(user_fea["input_name"])
                        # 缺失值填充
                        user_df[user_sparse_features] = user_df[user_sparse_features].fillna(-2)
                        user_df[user_dense_features] = user_df[user_dense_features].fillna(0)
                        user_df[user_dense_features] = user_df[user_dense_features].astype(float)

                        
                        # 删除获取到的重复的user数据
                        user_df = user_df.drop_duplicates(subset='USER_ID', keep='last')
                        # 删除ITEM_ID已经存在于本地数据库的数据（原则上以后一条为标准）
                        
                        user_id_list = user_df['USER_ID'].to_list()
                        # sql_search = "DELETE FROM {0}_all_user_data WHERE USER_ID IN %s".format(groupName)
                        sql_search = str(self.mysql_db_config['sql_statements']['user_data']['delete_user_data']).format(self.group_name)
                        cursor.execute(sql_search, (user_id_list,))  # 注意这里直接传递列表
                        data_to_insert = [row for row in user_df.itertuples(index=False)]
                        cursor.executemany(sql,data_to_insert)                
                        self.conn.commit()
                except pymysql.MySQLError as e:
                    # 回滚事务
                    self.conn.rollback()
                    my_logger.error("数据库错误，事务已回滚: %s", e)
                except Exception as e:
                    # 回滚事务
                    self.conn.rollback()
                    my_logger.error("发生未知错误，事务已回滚: %s", e)
                    # raise
                # print("let us check the result: ", result)
                if result['data']['finalRes']:
                    break
                self.save_config_file(data)
            else:
                my_logger.warning(f"fetch_user 返回 success=False: {result}")
                break
            

        my_logger.info("all_user_data更新完毕")
        self.save_config_file(data)
    
    def fetch_loan_history_proc(self, data):
        """
            获取借阅数据
        """
        # TODO: 这边还可以优化，不用从数据库提取user_hist，直接拿出来的loan history搞完后更新的时候拼接 contact(user_hist %s),减少对内存的消耗
        while(True):
            try:
                
                result = fetch_loan_history(self.base_url, self.headers,data['loan_last_id'])

            except Exception as e:
                my_logger.error(f"Error occurred while fetching load history: {e}")
                break

            if result['success']:

                if result['data']['size'] == 0:
                    my_logger.warning("结果数据中值size为0")
                    break

                data['loan_last_id'] = result['data']['dataList'][-1]['loanHistId']
                try:
                    with self.conn.cursor() as cursor:
                        result_df = pd.json_normalize(result['data']['dataList'])
                        # 只考虑 loanType='0' 的正常借阅
                        result_df = result_df[result_df['loanType'] == '0'][['loanHistId','itemId','userId','createDate','loanType']]
                        # 准备SQL查询语句
                        # sql_select = "SELECT USER_ID, user_hist FROM {0}_all_user_data WHERE USER_ID IN %s".format(groupName)
                        sql_select = str(self.mysql_db_config['sql_statements']['loan_history_data']['search_loan_history']).format(self.group_name)
                        user_ids = list(result_df['userId'].unique())
                        if user_ids:
                            cursor.execute(sql_select,(user_ids,))
                            user_hist_data = cursor.fetchall()
                            # 创建一个映射字典，将USER_ID映射到user_hist
                            user_hist_map = {user_id: user_hist for user_id, user_hist in user_hist_data}
                        
                            # 准备批量更新的数据
                            updates = []
                        
                            for _, row in result_df.iterrows():

                                user_hist_map[row['userId']] +=  str(row['itemId']) + '|'

                            # 执行批量更新
                            updates = [(user_hist, user_id) for user_id, user_hist in user_hist_map.items()]
                            # 准备SQL更新语句
                            # sql_update = "UPDATE all_user_data SET user_hist = %s WHERE USER_ID = %s"
                            sql_update = self.mysql_db_config['sql_statements']['loan_history_data']['update_loan_history']
                            sql_update = sql_update.replace('all_user_data', f'{self.group_name}_all_user_data')
                            cursor.executemany(sql_update, updates)
                            self.conn.commit() 
                        else:
                            my_logger.warning("未找到用户ID，跳过数据库操作！！！")
                except pymysql.MySQLError as e:
                    # 发生 MySQL 错误时回滚事务
                    self.conn.rollback()
                    # 记录错误信息
                    my_logger.error(f"An error occurred during database operation: {e}")
                    # 可以根据需要选择重新抛出异常
                    # raise e

                except Exception as e:
                    # 捕获其他可能的异常并回滚事务
                    self.conn.rollback()
                    my_logger.error(f"An unexpected error occurred: {e}")
                    # 可以选择抛出异常
                    # raise e
                if result['data']['finalRes']:
                    break
                # 用于实时保存处理进度
                self.save_config_file(data)
            else:
                my_logger.warning(f"fetch_load_history 返回success=False：{result}")
                break

        my_logger.info("根据借阅历史更新用户的行为序列完毕")
        # 用于最终保存完整进度
        self.save_config_file(data)


    def update_local_database(self):
        """
        主流程：顺序拉取并更新本地数据库。
        （可考虑多线程，但要注意数据库写入操作冲突）
        """
        data = self.load_config_file(self.config_path)

        # 创建对应库表
        self.conn.begin()
        self.create_database()

        # 拉取元数据
        self.fetch_meta_data_proc(data=data)
        # 拉取书本属性
        self.fetch_book_attr_proc(data=data)
        # 拉取 item 信息
        self.fetch_item_proc(data=data)
        # 拉取用户信息
        self.fetch_user_proc(data=data)
        # 拉取借阅历史
        self.fetch_loan_history_proc(data=data)

        self.conn.close()

import concurrent.futures

def process_group(groupName):
    max_retries = 2
    json_file = f'/opt/wyh/LSP_book_rec/configs/{groupName}.json'
    db_dir = "/opt/wyh/LSP_book_rec/global_configs"
    db_config_path = os.path.join(db_dir, "mysql_db_config.json")
    for attempt in range(max_retries + 1):
        try:
            # 每次尝试重新初始化以生成新签名
            pullProc = pullDataFromSql(group_name=groupName, config_path=json_file, db_config_path=db_config_path)
            # base_url, headers, conn = initial_load(json_file)
            pullProc.update_local_database()
        except Exception as e:
            my_logger.error(f"Error processing {groupName}: {e}")
            if attempt < max_retries:
                my_logger.error(f"Retrying {groupName} after 1 second...")
                time.sleep(1)  # 等待1s后重试
            else:
                my_logger.error(f"Max retries ({max_retries}) exceeded for {groupName}")

if __name__ == "__main__":
    # groupNames = ['NJULIB', 'SUDA', 'NJUPT', 'HENU', 'NNU']
    groupNames = ["WDU"]
    # groupNames = ["ECNU"]  # 示例中只有一个机构
    group_num = len(groupNames)
    # 根据机构数量选择执行方式
    if group_num == 1:
        # 单机构直接同步执行
        process_group(groupNames[0])
    else:
        # 多机构使用线程池并发
        with concurrent.futures.ProcessPoolExecutor(max_workers=group_num) as executor:
            executor.map(process_group, groupNames)



