import cx_Oracle
import pandas as pd
from sqlalchemy import create_engine
from sqlalchemy.types import VARCHAR, Integer, Date, TIMESTAMP, FLOAT
from utils import check_data
from utils_func.env import ROOT_PATH
from utils_func.get_logger import get_logger

logger = get_logger("monitor", ROOT_PATH.joinpath('log', 'monitor.log'))
# PYTHON FUNCTION TO CONNECT TO THE MYSQL DATABASE AND
# RETURN THE SQLACHEMY ENGINE OBJECT
def get_connection():
    # # IMPORT THE SQALCHEMY LIBRARY's CREATE_ENGINE METHOD
    # import pandas as pd
    # DEFINE THE DATABASE CREDENTIALS
    user = 'sjz_dm'#'lzhy_data13'
    password = 'sjz_dm2023'#'202403'
    host = '211.144.132.110'
    port = 7000
    database = 'dataming'    
    url="oracle+cx_oracle://{0}:{1}@{2}:{3}/{4}".format(user, password, host, port, database)
    return url

def insert_dataframe_in_chunks(df, chunk_size, engine, table_name, dtypes):
    if len(df)<=chunk_size:
        chunk_size = len(df)
    # Insert data in chunks
    for start in range(0, len(df), chunk_size):
        end = min(start + chunk_size, len(df))
        chunk_df = df.iloc[start:end]
        for col in ['日期','保障开始时间','保障结束时间','维修申请时间']:
            if col in list(chunk_df.columns):
                chunk_df[col] = pd.to_datetime(chunk_df[col])
        # if start == 0:
        #     chunk_df.to_sql(table_name, con=engine, index=False, if_exists='replace', chunksize=5000, dtype=dtypes)
        # else:
        chunk_df.to_sql(table_name, con=engine, index=False, if_exists='append', chunksize=5000, dtype=dtypes)
        logger.info(f"Inserted rows {start} to {end}")

def savetosql(df,table_name):
    # Specify column types
    dtypes = {
        '项目': VARCHAR(10),  # Adjust the length as needed
        '保司': VARCHAR(10),   # Adjust the length as needed
        '批次月份': VARCHAR(20),
        '统计品类': VARCHAR(10),
        '产品简称': VARCHAR(20),
        '保障开始时间': Date,
        '保障结束时间': Date,
        '匹配用': VARCHAR(40),
        '产品层级2': VARCHAR(10),
        '日期': Date,
        '维修申请时间': Date,
        '保费总和': FLOAT,
        '净费': FLOAT,
        '分摊': FLOAT
    }
    dtype = {}
    for col in df.columns:
        if col in dtypes.keys():
            dtype[col] = dtypes[col]
    
    engine = create_engine(get_connection())
    logger.info(df.info())
    chunk_size = 500000
    insert_dataframe_in_chunks(df, chunk_size, engine, table_name, dtype)

if __name__ == '__main__':

    lp_raw = pd.read_csv(f'{ROOT_PATH}\data\\2_理赔原始数据_苏宁香港人保.csv',encoding='GBK')
    lp_raw['维修申请时间'] = pd.to_datetime(lp_raw['维修申请时间']).dt.strftime('%Y-%m-%d')

    # 数据检查
    lp, lp_abnormal = check_data(lp_raw) #TODO save to revise abnormal values

    savetosql(lp,'LP_SNHK_RB')
# 	try:
# 		# GET THE CONNECTION OBJECT (ENGINE) FOR THE DATABASE
# 		engine = get_connection()
# 		logger.info(
# 			f"Connection to the {host} for user {user} created successfully.")
# 	except Exception as ex:
# 		logger.info("Connection could not be made due to the following error: \n", ex)
	
# 	engine = get_connection()
# 	df = pd.read_csv('data.csv')
# 	df.to_sql(name='table_name', con=engine, if_exists='replace', index=False)