# !/usr/bin/env python
# -*- coding: utf-8 -*-
# @File  : XGBoost-红酒品质分类.py
# @Author: dongguangwen
# @Date  : 2025-02-08 15:53
import pandas as pd
from sklearn.utils import class_weight
from xgboost import XGBClassifier
from sklearn.metrics import classification_report


train_data = pd.read_csv('./data/红酒品质分类_train.csv')
test_data = pd.read_csv('./data/红酒品质分类_test.csv')

x_train = train_data.iloc[:, :-1]
y_train = train_data.iloc[:, -1]
x_test = test_data.iloc[:, :-1]
y_test = test_data.iloc[:, -1]


class_weight = class_weight.compute_sample_weight(class_weight='balanced', y=y_train)  # 样本不均衡问题处理

model = XGBClassifier(n_estimators=10, objective='multi:softmax', learning_rate=0.3)
model.fit(x_train, y_train, sample_weight=class_weight)  # 训练的时候，指定样本的权重

y_pred = model.predict(x_test)

print(classification_report(y_test, y_pred))

"""
              precision    recall  f1-score   support

           0       0.25      0.50      0.33         2
           1       0.11      0.18      0.14        11
           2       0.68      0.70      0.69       136
           3       0.63      0.51      0.56       128
           4       0.52      0.68      0.59        40
           5       0.00      0.00      0.00         3

    accuracy                           0.59       320
   macro avg       0.37      0.43      0.39       320
weighted avg       0.61      0.59      0.60       320
"""
