#库导入
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.preprocessing import MinMaxScaler,StandardScaler
from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor
from keras.models import Sequential
from keras.layers import GRU, Dense
from sklearn.model_selection import train_test_split, KFold,GridSearchCV
from sklearn.metrics import accuracy_score, f1_score, roc_auc_score,r2_score,roc_curve
from keras.callbacks import EarlyStopping
from sklearn.metrics import mean_squared_error
from keras.models import load_model
from sklearn.linear_model import Ridge
from sklearn.pipeline import Pipeline
import pickle
import seaborn as sns
import joblib

# 设置matplotlib支持中文显示和小数点二位展示
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False
pd.options.display.float_format = '{:.2f}'.format

#定义季节划分函数
def get_season(month):
    if 3 <= month <= 5:
        return '春季'
    elif 6 <= month <= 8:
        return '夏季'
    elif 9 <= month <= 11:
        return '秋季'
    else:
        return '冬季'

# 读取数据(初始化)
def load_data():
    df = pd.read_csv('B.csv')
  
    # 转换数据类型
    df['门诊收入OBS_T01_MZSR68'] = df['门诊收入OBS_T01_MZSR68'].astype('float32')
    df['当日病房收入'] = df['当日病房收入'].astype('float32')
    df['药品总收入'] = df['药品总收入'].astype('float32')
    df['门诊患者人次数'] = df['门诊患者人次数'].astype('int32')
    df['当日病房收入对应科室'] = df['当日病房收入对应科室'].astype(str)

   # 筛选特定科室，并且排除收入为零的记录
    specific_departments = ['康复医学科三病房', '康复医学科一病房', '康复医学科二病房']
    # 计算每个科室的收入占比
    total_income = df['当日病房收入'].sum()
    income_by_department = df.groupby('当日病房收入对应科室')['当日病房收入'].sum()
    percentage_by_department = (income_by_department / total_income) * 100

    # 创建饼状图
    plt.figure(figsize=(10, 8))  # 设置图形的大小
    plt.pie(percentage_by_department, autopct='%1.1f%%', startangle=140)
    
    plt.title('当日病房收入对应科室类别占比')
    plt.show()
    
    return df

# 预处理数据
def preprocess_data(df):
    df_part = pd.concat([df[df['当日病房收入对应科室'] == '康复医学科一病房'].drop_duplicates(),
                     df[df['当日病房收入对应科室'] == '康复医学科二病房'].drop_duplicates(),
                     df[df['当日病房收入对应科室'] == '康复医学科三病房'].drop_duplicates()])
    
    # 增加日期的特征
    df_part['日期'] = pd.to_datetime(df_part['日期'], errors='coerce')
    df_part['年'] = df_part['日期'].dt.year
    df_part['月份'] = df_part['日期'].dt.month
    df_part['季节'] = df_part['日期'].dt.month.map(get_season)
    seasons = { '春季': 1, '夏季': 2, '秋季': 3, '冬季': 4 }
    df_part['季节'] = df_part['季节'].map(seasons)
    
    #四条异常值的规则
    print(df_part[df_part['当日病房收入']<=0])
    print(df_part[df_part['门诊收入OBS_T01_MZSR68']<=0])
    print(df_part[df_part['门诊患者人次数']<=0])
    print(df_part[df_part['药品总收入']<=0])
    print(df_part[df_part['门诊收入OBS_T01_MZSR68'] <= df_part['药品总收入']])
    cond1 = df_part['当日病房收入'] <= 0
    cond2 = df_part['门诊收入OBS_T01_MZSR68'] <= 0
    cond3 = df_part['门诊患者人次数'] <= 0
    cond4 = df_part['药品总收入'] <= 0
    cond5 = df_part['门诊收入OBS_T01_MZSR68'] <= df_part['药品总收入']

    # 使用 | 运算符合并条件
    out = df_part[cond1 | cond2 | cond3 | cond4 | cond5]

    # 将结果保存到Excel文件
    out.to_excel('异常值.xlsx', index=False)
    df_part = df_part[(df_part['当日病房收入'] >0) & (df_part['门诊收入OBS_T01_MZSR68'] >0) & (df_part['门诊患者人次数'] > 0) & (df_part['药品总收入'] >= 0)]
    df_part = df_part[df_part['门诊收入OBS_T01_MZSR68'] > df_part['药品总收入']]
    #df_part = df_part[(df_part['药品总收入'] + df_part['门诊收入OBS_T01_MZSR68']) > df_part['当日病房收入']]
    #df_part = df_part[(df_part['药品总收入'] + df_part['当日病房收入']) < df_part['门诊收入OBS_T01_MZSR68']]
    
   
    #删除df_part数据框中在features列表指定的列中包含缺失值的行
    df_part.dropna(subset=['门诊收入OBS_T01_MZSR68'], inplace=True)
     
    # 计算处理后门诊数据
    df_part['处理后门诊数据'] = df_part['门诊收入OBS_T01_MZSR68'] - df_part['当日病房收入'] - df_part['药品总收入']
    
    # 使用箱线图方法清洗离群值，针对'门诊收入OBS_T01_MZSR68'列
    Q1 = df_part['处理后门诊数据'].quantile(0.25)
    Q3 = df_part['处理后门诊数据'].quantile(0.75)
    IQR = Q3 - Q1

    # 确定异常值的边界
    k = 1.5  # 一般使用1.5作为倍数
    lower_bound = Q1 - k * IQR
    upper_bound = Q3 + k * IQR
    
    # 使用Seaborn绘制箱线图
    sns.set(style="whitegrid")  # 设置图表风格
    plt.figure(figsize=(10, 6))
    sns.boxplot(x=df_part['处理后门诊数据'])

    # 标记离群值
    outliers = df_part[(df_part['处理后门诊数据'] < lower_bound) | (df_part['处理后门诊数据'] > upper_bound)]
    for outlier in outliers['处理后门诊数据']:
        plt.plot(outlier, 1, 'r*')  # 用红色星号标记离群值

    # 显示图表
    plt.title('箱线图 - 处理后门诊数据')
    plt.show()
    # 识别并移除异常值
    df_part = df_part[(df_part['处理后门诊数据'] >= lower_bound) & (df_part['处理后门诊数据'] <= upper_bound)]
    
    return df_part


# 构建GRU模型
def build_gru_model(input_shape, units=1500):
    model = Sequential()
    model.add(GRU(units, return_sequences=True, input_shape=input_shape))
    model.add(GRU(1000))
    model.add(Dense(1))
    model.compile(optimizer='adam', loss='mean_squared_error',metrics=['accuracy'])
    return model
    
# 训练GRU模型
def train_gru_model(model, X_train, y_train, validation_split=0.1, epochs=100, batch_size=32):
    early_stopping = EarlyStopping(monitor='val_loss', patience=10, restore_best_weights=True, verbose=1)
    history = model.fit(
        X_train, y_train,
        epochs=epochs,
        batch_size=batch_size,
        validation_split=validation_split,
        verbose=1,
        callbacks=[early_stopping],
        shuffle=False
    )
    return model

#其他模型搜索与寻找
def model_selection_and_evaluation(X_train, y_train, X_test, y_test):
    # 使用k折交叉验证和随机森林回归模型
    kf = KFold(n_splits=10)

    # 尝试不同的模型
    models = {
        'RandomForest': RandomForestRegressor(random_state=42),
        'GradientBoosting': GradientBoostingRegressor(random_state=42)
    }

    param_grid = {
        'RandomForest': {
            'regressor__n_estimators': [100, 200, 300],
            'regressor__max_depth': [10, 20, 30]
        },
        'GradientBoosting': {
            'regressor__n_estimators': [100, 200, 300],
            'regressor__learning_rate': [0.01, 0.1, 0.2],
            'regressor__max_depth': [3, 5, 7]
        }
    }

    best_models = {}
    for name, model in models.items():
        pipeline = Pipeline([
            ('scaler', StandardScaler()),
            ('regressor', model)
        ])
        grid_search = GridSearchCV(pipeline, param_grid[name], cv=kf, scoring='neg_mean_squared_error')
        grid_search.fit(X_train, y_train)
        best_models[name] = grid_search.best_estimator_
        train_score = mean_squared_error(y_train, best_models[name].predict(X_train))
        val_score = mean_squared_error(y_test, best_models[name].predict(X_test))

    # 选择验证损失最小的模型进行保存
    best_model_name = min(best_models, key=lambda name: mean_squared_error(y_test, best_models[name].predict(X_test)))
    best_model = best_models[best_model_name]
    return best_model
    
#主函数
def main():
    # 读取和预处理数据
    df = load_data()
    df_part = preprocess_data(df)
    df_part['日期数'] = (df_part['日期'] - df_part['日期'].min()) / np.timedelta64(1, 'D')
    df_part['星期几'] = df_part['日期'].dt.dayofweek
    
    
    total=df_part.isnull().sum().sort_values(ascending=False) 
    percent=(df_part.isnull().sum()/df_part.isnull().count()).sort_values(ascending=False) 
    missing_data=pd.concat([total,percent],axis=1,keys=['total','percent']) 
    missing_data['name']=missing_data.index 
    missing_data.to_excel('缺失值.xlsx')
    
 
    # 定义特征列和两个目标列
    features1 = ['门诊患者人次数', '药品总收入', '当日病房收入', '年', '季节']
    features2=['日期数', '门诊患者人次数', '星期几', '月份', '年']
    target1 = '门诊收入OBS_T01_MZSR68'
    target2 = '处理后门诊数据'
    
    # 数据归一化
    scaler_x1 = MinMaxScaler()
    scaler_y1 = MinMaxScaler()
    X1 = scaler_x1.fit_transform(df_part[features1])
    y1 = scaler_y1.fit_transform(df_part[target1].values.reshape(-1, 1))
    X2 = df_part[features2]
    y2 = df_part[target2]
    
    # 划分训练集和测试集
    X1_train, X1_test, y1_train, y1_test = train_test_split(X1, y1, test_size=0.2, random_state=42)
    X2_train, X2_test, y2_train, y2_test = train_test_split(X2, y2, test_size=0.2, random_state=42)
    
    
    # 调整数据形状以符合GRU模型的期望
    X1_train = np.reshape(X1_train, (X1_train.shape[0], 1, X1_train.shape[1]))
    X1_test = np.reshape(X1_test, (X1_test.shape[0], 1, X1_test.shape[1]))
    
    
    # GRU进行预测
    gru_model = build_gru_model((1, X1_train.shape[2]))  # 确保 input_shape 正确
    gru_model = train_gru_model(gru_model, X1_train, y1_train.ravel())
    df_new = df_part
    X_new = scaler_x1.transform(df_new[features1])
    X_new = np.reshape(X_new, (X_new.shape[0], 1, X_new.shape[1]))
    y_pred_raw = gru_model.predict(X_new)
    y_pred_gru = scaler_y1.inverse_transform(y_pred_raw)
    y_pred_gru=y_pred_gru.ravel()
    
    
    
    
    #使用best_mode进行预测
    best_model=model_selection_and_evaluation(X2_train,y2_train,X2_test,y2_test)
    # 使用最佳模型进行预测
    df_part['预测的处理后门诊数据'] = best_model.predict(X2)
    y_pred_best_model =  df_part['预测的处理后门诊数据'] + df_part['当日病房收入'] + df_part['药品总收入']
    
   
    
    #计算最佳权重
    models=[best_model,gru_model]
    
    # 定义验证集数据和标签
    X_gru_val = X1_train
    y_gru_val=y1_train
    x_best_model_val= X2_train
    y_best_model_val = y2_train
    
    # 计算 GRU 模型的准确率
    accuracy1 = gru_model.evaluate(X_gru_val, y_gru_val.ravel(), verbose=0)[1]

    # 计算 best_model 的准确率
    accuracy2 = best_model.score(x_best_model_val, y_best_model_val, scoring='accuracy')

    # 将两个模型的准确率存储在列表中
    scores = [accuracy1, accuracy2]
    
    # 计算权重，这里使用性能分数的平均值作为权重
    total_score = sum(scores)
    weights = [score / total_score for score in scores]
    
    
    #根据权重，集成二个模型
    ensemble_pred=weights[0]*y_pred_gru+y_pred_best_model*weights[1]
    
    #评估集成模型性能
    ensemble_r2 = r2_score(df_part[target1], ensemble_pred)
    print(f'集成模型R^2: {ensemble_r2:.4f}')
    
    
    # 确定分类阈值
    threshold = df_part[target1].median()
    y_binary = (df_part[target1] > threshold).astype(int)
    y_pred_ensemble_binary = (y_pred_gru > threshold).astype(int)
    
    
    # 计算分类指标
    accuracy = accuracy_score(y_binary, y_pred_ensemble_binary)
    f1 = f1_score(y_binary, y_pred_ensemble_binary)
    auc = roc_auc_score(y_binary, y_pred_ensemble_binary)


    print(f'集成模型准确率: {accuracy:.4f}')
    print(f'集成模型F1-score: {f1:.4f}')
    print(f'集成模型AUC: {auc:.4f}')
    
    # 计算ROC曲线的真正例率和假正例率
    fpr, tpr, _ = roc_curve(y_binary, y_pred_gru)

    # 绘制ROC曲线
    plt.figure()
    plt.plot(fpr, tpr, label='AUC = {:.4f}'.format(auc))
    plt.plot([0, 1], [0, 1], 'k--')  # 随机猜测的ROC曲线
    plt.xlim([0.0, 1.0])
    plt.ylim([0.0, 1.05])
    plt.xlabel('False Positive Rate')
    plt.ylabel('True Positive Rate')
    plt.title('Receiver Operating Characteristic')
    plt.legend(loc='lower right')
    plt.show()
    
    
     #可视化
    plt.figure(figsize=(12, 6))
    sns.lineplot(x=df_part.index, y=df_part[target1], label='Actual Data', marker='o')
    sns.lineplot(x=df_part.index, y=ensemble_pred, label='GRU Model Prediction', marker='x')
    plt.title('实际数据 Vs 集成模型预测数据')
    plt.xlabel('时间')
    plt.ylabel('收入')
    plt.legend()
    plt.show()
    
    
    model_path_best = f'best_model.joblib'
    model_path_gru = f'gru_model.joblib'

    joblib.dump(best_model, model_path_best, protocol=4)
    joblib.dump(gru_model, model_path_gru, protocol=4)
    with open('scaler_x.pkl', 'wb') as f:
        joblib.dump(scaler_x1, f)
    with open('scaler_y.pkl', 'wb') as f:
        joblib.dump(scaler_y1, f)       
    

 
if __name__ == "__main__":
    main()
