﻿﻿import pandas as pd

import matplotlib.pyplot as plt

import numpy as np

from pandas.plotting import register_matplotlib_converters

from adtk.detector import QuantileAD, OutlierDetector, SeasonalAD, PersistAD, LevelShiftAD, VolatilityShiftAD, AutoregressionAD, MinClusterDetector, RegressionAD, PcaAD, ThresholdAD

from adtk.aggregator import AndAggregator, OrAggregator

from adtk.pipe import Pipenet, Pipeline

from adtk.transformer import RollingAggregate, DoubleRollingAggregate, ClassicSeasonalDecomposition

from sklearn.cluster import KMeans

from sklearn.neighbors import LocalOutlierFactor

from sklearn.linear_model import LinearRegression, Ridge, Lasso

from sklearn.ensemble import IsolationForest, RandomForestRegressor, GradientBoostingRegressor

from statsmodels.tsa.seasonal import seasonal_decompose

from sklearn.metrics import confusion_matrix

from pandas.tseries.offsets import DateOffset



#初始化存结果数组

tps_list = []  # 真阳性

fps_list = []  # 假阳性

tns_list = []  # 真阴性

fns_list = []  # 假阴性



# 数据预处理与注册转换器

register_matplotlib_converters()

data = pd.read_csv("anomaly_detection/data/sample_data_hour_with_label_final.csv")

data_game_stats = pd.read_csv("anomaly_detection/data/game_statinfo.csv")



# 设置目标列与日期处理

target_col = 'conversion_rate_2_3'

data['dt'] = pd.to_datetime(data['dt'], format='%Y/%m/%d') + pd.to_timedelta(data['hour'], unit='H')

data.set_index('dt', inplace=True)

data['conversion_rate_2_3_label'] = data['conversion_rate_2_3_label']  == 1

data_game_stats['dt'] = pd.to_datetime(data_game_stats['dt']) 

data_game_stats.set_index('dt', inplace=True) 



#定义日期范围

start_date = data.index.min().date()

end_date = data.index.max().date()

all_hours = pd.date_range(start=start_date, end=end_date, freq='H')



# 主循环处理不同game_id与os类型

gids = data['gid'].unique()

oss = data['os'].unique()



for gid in gids:

    for os_type in oss:

        #数据过滤与准备

        filtered_data = data[(data['gid'] == gid)& (data['os'] == os_type)]

        filtered_data= filtered_data.sort_values(by='conversion_rate_2_3_label', ascending=True)

        data_filtered_game_stats = data_game_stats[(data_game_stats['gid']==gid)&(data_game_stats['os']==os_type)]

        

        #筛选低转化率数据

        low_conversion_dates = data_filtered_game_stats[data_filtered_game_stats['conversion_2_rolling_3days'] < 340].index  # 筛选出低转化率的日期

        if filtered_data[target_col].sum() == 0:

            continue 

        series_to_validate = filtered_data[target_col]

        series_to_validate = series_to_validate.to_frame()

        series_to_validate = series_to_validate.resample('1H').mean()

        if series_to_validate[target_col].sum() == 0:

            continue  

        series_to_validate.fillna(series_to_validate.mean(), inplace=True)

        series_to_validate = (series_to_validate-np.mean(series_to_validate))/np.std(series_to_validate)



        #STL分解

        decomposition = seasonal_decompose(series_to_validate, model='additive') 

        s_transformed = series_to_validate

        seasonal_component = decomposition.seasonal

        if seasonal_component.var() >= 0.5:  

            s_transformed = ClassicSeasonalDecomposition().fit_transform(s_transformed) 



        #模型搭建

        steps1 = {

            "VarianceRollingWindow": {  # 计算方差的6小时滚动窗口

                "model": RollingAggregate(agg="var", window=6),

                "input": "original"

            },

            "QuantileADOnVariance": {  # 基于方差的高维和低维分位数异常检测

                "model": QuantileAD(high=0.9999999999999999, low=0.00000000000000001),

                "input": "VarianceRollingWindow"

            },

            "IsolationForestVariance": {  # 使用Isolation Forest检测方差中的异常

                "model": OutlierDetector(LocalOutlierFactor(n_neighbors=5)),

                "input": "VarianceRollingWindow"

            },

            "CombinedVarianceAnomalies": {  # 合并方差检测的两个结果

                "model": AndAggregator(),

                "input": ["QuantileADOnVariance", "IsolationForestVariance"]

            },

            "StdDeviationRollingWindow": {  # 计算标准差的8小时滚动窗口

                "model": RollingAggregate(agg="std", window=8),

                "input": "original"

            },

            "QuantileADOnStdDev": {  # 基于标准差的高维和低维分位数异常检测

                "model": QuantileAD(high=0.99, low=0.01),

                "input": "StdDeviationRollingWindow"

            },

            "IsolationForestStdDev": {  # 使用Isolation Forest检测标准差中的异常

                "model": OutlierDetector(IsolationForest(contamination=0.1)),

                "input": "StdDeviationRollingWindow"

            },

            "CombinedStdDevAnomalies": {  # 合并标准差检测的两个结果

                "model": AndAggregator(),

                "input": ["QuantileADOnStdDev", "IsolationForestStdDev"]

            },

            "CombineAllStatisticalAnomalies": {  # 合并方差和标准差的所有异常检测结果

                "model": AndAggregator(),

                "input": ["CombinedVarianceAnomalies", "CombinedStdDevAnomalies"]

            },

            "LOFOnOriginalData": {  # 对原始数据使用局部异常因子（LOF）检测

                "model": OutlierDetector(IsolationForest(contamination=0.02)),

                "input": "original"

            },

            "KMeansClusterOnOriginal": {  # 对原始数据进行K-means聚类检测异常

                "model": MinClusterDetector(KMeans(n_clusters=4)),

                "input": "original"

            },

            "CombinedOriginalDataAnomalies": {  # 合并基于原始数据的LOF和K-means检测结果

                "model": AndAggregator(),

                "input": ["LOFOnOriginalData", "KMeansClusterOnOriginal"]

            },

            "FinalCombinedAnomalies": {  # 最终合并所有类型的异常检测结果

                "model": OrAggregator(),

                "input": ["CombinedOriginalDataAnomalies", "CombineAllStatisticalAnomalies"]

            },

        }

        steps2 = {

            "RandomForestAutoregressionAD": {  # 基于随机森林的自回归异常检测

                "model": AutoregressionAD(regressor=RandomForestRegressor(n_estimators=20)),

                "input": "original"

            },

            "GradientBoostingAutoregressionAD": {  # 基于梯度提升的自回归异常检测

                "model": AutoregressionAD(regressor=GradientBoostingRegressor(n_estimators=20)),

                "input": "original"

            },

            "QuantileADStdAdjusted": {  # 调整分位数阈值的标准差分位数异常检测

                "model": QuantileAD(high=0.9999999999999999, low=0.11),

                "input": "original"

            },

            "CombinedAutoregressiveModels": {  # 综合两种自回归模型及调整后标准差检测的结果

                "model": AndAggregator(),

                "input": ["RandomForestAutoregressionAD", "GradientBoostingAutoregressionAD", "QuantileADStdAdjusted"]

            },

        }



        pipenet1 = Pipenet(steps1)

        pipenet2 = Pipenet(steps2)

        

        #异常检测

        anomalies1 = pipenet1.fit_detect(s_transformed) ==1

        anomalies2 = pipenet2.fit_detect(series_to_validate) ==1

        anomalies = (anomalies2|anomalies1)



        #排除转化率低的日期

        false_hours = pd.DatetimeIndex([])

        for low_date in low_conversion_dates:

            false_hours = false_hours.union(pd.date_range(start=low_date, periods=24, freq='H'))

        false_hours = false_hours.intersection(filtered_data.index)

        anomalies.loc[false_hours] = False



        #过滤孤立异常点

        isolated_anomalies = []

        for idx in anomalies[anomalies].index:

            if (idx == anomalies.index[0] or idx == anomalies.index[-1]) or \
            (((not anomalies[idx + pd.DateOffset(hours=5)]

                and not anomalies[idx - pd.DateOffset(hours=5)])or(not anomalies[idx + pd.DateOffset(hours=1)]

                and not anomalies[idx - pd.DateOffset(hours=1)]))and

                ((not anomalies[idx + pd.DateOffset(hours=4)]

                and not anomalies[idx - pd.DateOffset(hours=4)])or(not anomalies[idx + pd.DateOffset(hours=3)]

                and not anomalies[idx - pd.DateOffset(hours=3)]))

                and

                ((not anomalies[idx + pd.DateOffset(hours=6)]

                and not anomalies[idx - pd.DateOffset(hours=6)])or(not anomalies[idx + pd.DateOffset(hours=2)]

                and not anomalies[idx - pd.DateOffset(hours=2)]))):

                isolated_anomalies.append(idx)

        for idx in isolated_anomalies:

            anomalies.loc[idx] = False

        

        #计算混淆矩阵与性能指标

        true_labels = filtered_data['conversion_rate_2_3_label']

        anomalies_aligned = anomalies.reindex(true_labels.index, fill_value=False)

        cm = confusion_matrix(true_labels, anomalies_aligned, labels=[False, True])

        tps = cm[1, 1]  

        fps = cm[0, 1]  

        tns = cm[0, 0]  

        fns = cm[1, 0]  

        

        tps_list.append(tps)

        fps_list.append(fps)

        tns_list.append(tns)

        fns_list.append(fns)

        

        '''

        #visualization

        plt.plot(series_to_validate)

        #plt.plot(s_transformed, label='Data')

        anomaly_indices = anomalies_aligned.index[anomalies_aligned]

        anomaly_values = series_to_validate.loc[anomaly_indices]

        plt.scatter(anomaly_indices, anomaly_values, color='red', label='Anomaly')

        #plt.scatter(true_labels.index[true_labels], series_to_validate[true_labels], color='purple', label='Anomaly')

        plt.legend()

        plt.title(" Anomaly Detection")

        plt.xlabel('Date')

        plt.ylabel('Normalized-Conversion')

        plt.tight_layout() 

        plt.show()

        '''

        

total_tps = sum(tps_list)

total_fps = sum(fps_list)

total_tns = sum(tns_list)

total_fns = sum(fns_list)



#性能汇总

try:

    total_accuracy = (total_tps + total_tns) / (total_tps + total_fps + total_tns + total_fns)

    total_precision = total_tps / (total_tps + total_fps)

    total_recall = total_tps / (total_tps + total_fns)

    f1 = (total_precision*2*total_recall)/(total_precision+total_recall)



    print(f"tp: {total_tps}")

    print(f"fp: {total_fps}")

    print(f"tn: {total_tns}")

    print(f"fn: {total_fns}")

    

    print(f"Total Accuracy: {total_accuracy:.9f}")

    print(f"Total Precision: {total_precision:.9f}")

    print(f"Total Recall: {total_recall:.9f}")

    print(f"f1: {f1:.9f}")

except ZeroDivisionError:

    print("Error calculating metrics due to division by zero.")