import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import precision_score, recall_score, f1_score
from imblearn.over_sampling import SMOTE

# 读取数据
def read_data():
    try:
        # 读取用户在商品全集上的移动端行为数据
        user_data = pd.read_csv('./cleaned_user_data.csv')
        # 读取脱敏后的商品子集
        item_data = pd.read_csv('./cleaned_item_data.csv')
        return user_data, item_data
    except FileNotFoundError:
        print("文件未找到，请检查文件路径和文件名。")
    except pd.errors.ParserError:
        print("文件解析错误，请检查文件格式。")
    return None, None

# 数据预处理
def preprocess_data(user_data, item_data):
    # 筛选出商品子集P中的行为数据，并创建副本
    filtered_user_data = user_data[user_data['item_id'].isin(item_data['item_id'])].copy()

    # 处理缺失值
    filtered_user_data = filtered_user_data.fillna('unknown')
    item_data = item_data.fillna('unknown')

    # 对分类特征进行编码
    label_encoders = {}
    categorical_features = ['user_id', 'item_id', 'user_geohash', 'item_category', 'item_geohash']
    for feature in categorical_features:
        le = LabelEncoder()
        if feature in filtered_user_data.columns:
            filtered_user_data.loc[:, feature] = le.fit_transform(filtered_user_data[feature])
        if feature in item_data.columns:
            item_data.loc[:, feature] = le.fit_transform(item_data[feature])
        label_encoders[feature] = le

    return filtered_user_data, item_data, label_encoders

# 特征工程
def feature_engineering(filtered_user_data):
    # 衍生特征：用户对商品的浏览次数
    view_count = filtered_user_data[filtered_user_data['behavior_type'] == 1].groupby(['user_id', 'item_id']).size().reset_index(name='view_count')
    # 衍生特征：用户对商品的收藏次数
    collect_count = filtered_user_data[filtered_user_data['behavior_type'] == 2].groupby(['user_id', 'item_id']).size().reset_index(name='collect_count')
    # 衍生特征：用户对商品的加购物车次数
    cart_count = filtered_user_data[filtered_user_data['behavior_type'] == 3].groupby(['user_id', 'item_id']).size().reset_index(name='cart_count')
    # 衍生特征：用户对商品的购买次数
    buy_count = filtered_user_data[filtered_user_data['behavior_type'] == 4].groupby(['user_id', 'item_id']).size().reset_index(name='buy_count')

    # 合并特征
    features = pd.merge(view_count, collect_count, on=['user_id', 'item_id'], how='outer')
    features = pd.merge(features, cart_count, on=['user_id', 'item_id'], how='outer')
    features = pd.merge(features, buy_count, on=['user_id', 'item_id'], how='outer')
    features.fillna(0, inplace=True)

    # 目标变量：是否购买
    features['is_buy'] = (features['buy_count'] > 0).astype(int)

    return features

# 模型训练与评估
def train_and_evaluate(features):
    X = features.drop(['user_id', 'item_id', 'buy_count', 'is_buy'], axis=1)
    y = features['is_buy']

    # 检查目标变量的分布
    unique_classes, counts = np.unique(y, return_counts=True)
    print("目标变量 'y' 的分布：", dict(zip(unique_classes, counts)))

    if len(unique_classes) < 2:
        print("目标变量 'y' 只有一个类别，无法进行模型训练。")
        return {}

    # 处理数据不平衡问题
    smote = SMOTE(random_state=42)
    X_resampled, y_resampled = smote.fit_resample(X, y)

    X_train, X_test, y_train, y_test = train_test_split(X_resampled, y_resampled, test_size=0.2, random_state=42)

    # 定义三种模型
    models = {
        'RandomForest': RandomForestClassifier(),
        'LogisticRegression': LogisticRegression(),
        'KNeighbors': KNeighborsClassifier()
    }

    results = {}
    for model_name, model in models.items():
        model.fit(X_train, y_train)
        y_pred = model.predict(X_test)
        precision = precision_score(y_test, y_pred)
        recall = recall_score(y_test, y_pred)
        f1 = f1_score(y_test, y_pred)
        results[model_name] = {
            'precision': precision,
            'recall': recall,
            'f1': f1
        }

    return results

# 生成预测结果
def generate_predictions(features, model):
    X = features.drop(['user_id', 'item_id', 'buy_count', 'is_buy'], axis=1)
    user_item = features[['user_id', 'item_id']]
    y_pred = model.predict(X)
    predictions = user_item[y_pred == 1]
    return predictions

# 主函数
def main():
    user_data, item_data = read_data()
    if user_data is not None and item_data is not None:
        filtered_user_data, item_data, label_encoders = preprocess_data(user_data, item_data)
        features = feature_engineering(filtered_user_data)
        results = train_and_evaluate(features)

        for model_name, metrics in results.items():
            print(f'{model_name}:')
            print(f'Precision: {metrics["precision"]}')
            print(f'Recall: {metrics["recall"]}')
            print(f'F1-score: {metrics["f1"]}')
            print()

        # 选择 F1 分数最高的模型进行预测
        best_model_name = max(results, key=lambda k: results[k]['f1'])
        best_model = None
        if best_model_name == 'RandomForest':
            best_model = RandomForestClassifier()
        elif best_model_name == 'LogisticRegression':
            best_model = LogisticRegression()
        elif best_model_name == 'KNeighbors':
            best_model = KNeighborsClassifier()

        X = features.drop(['user_id', 'item_id', 'buy_count', 'is_buy'], axis=1)
        y = features['is_buy']
        unique_classes = np.unique(y)
        if len(unique_classes) > 1:
            smote = SMOTE(random_state=42)
            X_resampled, y_resampled = smote.fit_resample(X, y)
        else:
            X_resampled, y_resampled = X, y

        best_model.fit(X_resampled, y_resampled)
        predictions = generate_predictions(features, best_model)

        # 保存预测结果
        predictions.to_csv('tianchi_mobile_recommendation_predict.csv', index=False)

if __name__ == "__main__":
    main()