import mysql.connector
import pandas as pd
from sqlalchemy import create_engine
from sklearn.tree import DecisionTreeClassifier
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.preprocessing import LabelEncoder
import joblib
import os

# 数据库连接配置
config = {
    'user': 'root',
    'password': 'root',
    'host': 'localhost',
    'database': 'ticketingsystem'
}

# 创建 SQLAlchemy 引擎
engine = create_engine(f"mysql+mysqlconnector://{config['user']}:{config['password']}@{config['host']}/{config['database']}")

# 查询数据
def fetch_data(query):
    try:
        df = pd.read_sql(query, engine)
        print("Data fetched successfully")
        return df
    except Exception as e:
        print(f"Error fetching data: {e}")
    return None

# 数据预处理和特征工程
def preprocess_data(df):
    # 处理 Short_description 字段中的 None 值
    df['Short_description'] = df['Short_description'].fillna("Unknown")

    # 筛选逻辑
    def custom_filter_logic(row):
        # 1. Assignment_group
        if row['Assignment_group'] == 'Cloud-Administrators-L1':
            return 0  # 虚假事件

        # 2. Opened_by
        if row['Opened_by'] != 'IntegrationSplunk':
            return 1  # 真实事件

        # 3. Short_description
        if 'NON-PROD' in row['Short_description']:
            return 0  # 虚假事件
        elif 'LogicalDisk : %' in row['Short_description'] or 'Memory : %' in row['Short_description']:
            return 1  # 真实事件

        # 4. Resolved - Opened 时间差
        if pd.notnull(row['Opened']) and pd.notnull(row['Resolved']):
            opened = pd.to_datetime(row['Opened'])
            resolved = pd.to_datetime(row['Resolved'])
            time_diff = (resolved - opened).total_seconds() / 3600  # 时间差（小时）
            if time_diff < 1:
                return 0  # 虚假事件
            else:
                return 1  # 真实事件

        # 默认为真实事件
        return 1

    # 应用自定义筛选逻辑
    df['label'] = df.apply(custom_filter_logic, axis=1)

    # 文本特征向量化
    vectorizer = TfidfVectorizer(max_features=100)
    description_features = vectorizer.fit_transform(df['Short_description']).toarray()
    df_vectorized = pd.DataFrame(description_features, columns=[f"desc_{i}" for i in range(description_features.shape[1])])

    # 类别特征编码（仅用于模型训练）
    label_encoder = LabelEncoder()
    df['Assignment_group_encoded'] = label_encoder.fit_transform(df['Assignment_group'])
    df['Opened_by_encoded'] = label_encoder.fit_transform(df['Opened_by'])
    df['Project_encoded'] = label_encoder.fit_transform(df['Project'])
    df['Assigned_to_encoded'] = label_encoder.fit_transform(df['Assigned_to'])

    # 构造特征向量
    df_final = pd.concat([
        df_vectorized,  # 文本特征向量化结果
        df[['Assignment_group_encoded', 'Opened_by_encoded', 'Project_encoded', 'Assigned_to_encoded']]  # 编码后的类别特征
    ], axis=1)

    # 返回处理后的数据和必要的原始数据（仅保留 id 和 label）
    return df_final, vectorizer, label_encoder, df[['id', 'label']]

# 训练决策树模型
def train_model():
    query = "SELECT * FROM incidents"
    data = fetch_data(query)
    if data is None:
        print("Failed to fetch data from the database.")
        return

    # 数据预处理
    df_final, vectorizer, label_encoder, df_with_label = preprocess_data(data)

    # 特征和目标列
    X = df_final  # 特征列
    y = df_with_label['label']  # 目标列

    # 训练决策树模型
    model = DecisionTreeClassifier(random_state=42)
    model.fit(X, y)

    # 确保保存路径存在
    model_dir = 'Ticketing-System/server/model'
    os.makedirs(model_dir, exist_ok=True)

    # 保存模型、向量化器和标签编码器
    joblib.dump(model, f'{model_dir}/model.pkl')
    joblib.dump(vectorizer, f'{model_dir}/vectorizer.pkl')
    joblib.dump(label_encoder, f'{model_dir}/label_encoder.pkl')

    # 保存包含 id 和 label 的数据
    df_with_label.to_csv(f'{model_dir}/incident_labels.csv', index=False)

    print("Model trained and saved successfully. Incident labels saved.")

if __name__ == '__main__':
    train_model()