# # model_std_3in1.py  （工单编号已写入头部）
# # 大数据-用户画像-18-京东评论情感预测
# # ---------------------------------------------------------
# # 模型①：逻辑回归（手推 SGD）
# # 模型②：梯度提升树（手推 CART）
# # 模型③：朴素贝叶斯（手推多项式）
# # ---------------------------------------------------------
# import csv, math, random, joblib, glob, os
# from collections import Counter, defaultdict
#
# from sklearn.metrics import f1_score
#
#
# # ---------- ① 逻辑回归 ----------
# class LogisticRegressionSTD:
#     def __init__(self, lr=0.1, epochs=20):
#         self.lr = lr
#         self.epochs = epochs
#         self.coef = None
#
#     def fit(self, X, y):
#         n = len(X[0])
#         self.coef = [0.0] * (n + 1)
#         for epoch in range(self.epochs):
#             for row, label in zip(X, y):
#                 z = self.coef[0] + sum(c * v for c, v in zip(self.coef[1:], row))
#                 p = 1.0 / (1.0 + math.exp(-z))
#                 err = label - p
#                 self.coef[0] += self.lr * err
#                 for i in range(len(row)):
#                     self.coef[i + 1] += self.lr * err * row[i]
#
#     def predict(self, X):
#         return [1 if (self.coef[0] + sum(c * v for c, v in zip(self.coef[1:], row))) >= 0 else 0 for row in X]
#
#
# # ---------- ② 梯度提升树（CART） ----------
# class CARTNode:
#     def __init__(self, gini, num_samples, num_classes, value, feature_idx=None, threshold=None, left=None, right=None):
#         self.gini = gini
#         self.num_samples = num_samples
#         self.num_classes = num_classes
#         self.value = value
#         self.feature_idx = feature_idx
#         self.threshold = threshold
#         self.left = left
#         self.right = right
#
#
# class GradientBoostingSTD:
#     def __init__(self, n_estimators=100, max_depth=3, learning_rate=0.1):
#         self.n_estimators = n_estimators
#         self.max_depth = max_depth
#         self.learning_rate = learning_rate
#         self.trees = []
#
#     def _gini(self, y):
#         m = len(y)
#         if m == 0:
#             return 0.0
#         counts = Counter(y)
#         return 1.0 - sum((c / m) ** 2 for c in counts.values())
#
#     def _best_split(self, X, y):
#         m, n = len(y), len(X[0])
#         best_gini, best_idx, best_thr = 1.0, None, None
#         for feature_idx in range(n):
#             thresholds = sorted(set(row[feature_idx] for row in X))
#             for thr in thresholds:
#                 left_y = [y[i] for i in range(m) if X[i][feature_idx] <= thr]
#                 right_y = [y[i] for i in range(m) if X[i][feature_idx] > thr]
#                 gini = (len(left_y) / m) * self._gini(left_y) + (len(right_y) / m) * self._gini(right_y)
#                 if gini < best_gini:
#                     best_gini, best_idx, best_thr = gini, feature_idx, thr
#         return best_idx, best_thr
#
#     def _build_tree(self, X, y, depth=0):
#         num_samples_per_class = Counter(y)
#         predicted_class = max(num_samples_per_class, key=num_samples_per_class.get)
#         node = CARTNode(
#             gini=self._gini(y),
#             num_samples=len(y),
#             num_classes=len(num_samples_per_class),
#             value=predicted_class
#         )
#         if depth < self.max_depth:
#             idx, thr = self._best_split(X, y)
#             if idx is not None:
#                 left_X, left_y, right_X, right_y = [], [], [], []
#                 for i in range(len(y)):
#                     if X[i][idx] <= thr:
#                         left_X.append(X[i])
#                         left_y.append(y[i])
#                     else:
#                         right_X.append(X[i])
#                         right_y.append(y[i])
#                 node.feature_idx = idx
#                 node.threshold = thr
#                 node.left = self._build_tree(left_X, left_y, depth + 1)
#                 node.right = self._build_tree(right_X, right_y, depth + 1)
#         return node
#
#     def fit(self, X, y):
#         self.trees = []
#         residual = y[:]
#         for _ in range(self.n_estimators):
#             tree = self._build_tree(X, residual)
#             self.trees.append(tree)
#             # 简单残差 = 真实 - 预测
#             pred = [self._predict_one(tree, row) for row in X]
#             residual = [r - p for r, p in zip(residual, pred)]
#
#     def _predict_one(self, node, row):
#         while node.left or node.right:
#             if row[node.feature_idx] <= node.threshold:
#                 node = node.left
#             else:
#                 node = node.right
#         return node.value
#
#     def predict(self, X):
#         preds = [0] * len(X)
#         for tree in self.trees:
#             for i, row in enumerate(X):
#                 preds[i] += self.learning_rate * self._predict_one(tree, row)
#         return [1 if p >= 0.5 else 0 for p in preds]
#
#
# # ---------- ③ 朴素贝叶斯（多项式） ----------
# class NaiveBayesSTD:
#     def fit(self, X, y):
#         self.class_log_prior_ = {}
#         self.feature_log_prob_ = {}
#         n_samples = len(y)
#         for cls in [0, 1]:
#             cls_count = sum(1 for label in y if label == cls)
#             self.class_log_prior_[cls] = math.log(cls_count / n_samples)
#             X_cls = [row for row, label in zip(X, y) if label == cls]
#             n_features = len(X[0])
#             feature_counts = [0.0] * n_features
#             for row in X_cls:
#                 for i, val in enumerate(row):
#                     feature_counts[i] += val
#             total_count = sum(feature_counts) + n_features  # 拉普拉斯平滑
#             self.feature_log_prob_[cls] = [math.log((count + 1) / total_count) for count in feature_counts]
#
#     def predict(self, X):
#         return [self._predict_row(row) for row in X]
#
#     def _predict_row(self, row):
#         log_prob = {}
#         for cls in [0, 1]:
#             log_prob[cls] = self.class_log_prior_[cls]
#             for i, val in enumerate(row):
#                 log_prob[cls] += val * self.feature_log_prob_[cls][i]
#         return max(log_prob, key=log_prob.get)
#
#
# # ---------- 主流程 ----------
# def main():
#     files = glob.glob(r'E:\pythonProject\Xmkb_qg\train_data\*')
#     print('>>> 文件列表', files)
#     X, y = [], []
#     for f in files:
#         for row in csv.reader(open(f, newline=''), delimiter='\t'):
#             if len(row) != 8:
#                 continue
#             X.append([float(x) for x in row[1:]])
#             y.append(int(row[0]))
#     print('>>> 样本数', len(X))
#
#     # ① LR
#     print(">>> 模型①：LogisticRegression（手推）")
#     lr = LogisticRegressionSTD(lr=0.1, epochs=10)
#     lr.fit(X, y)
#     pred_lr = lr.predict(X)
#     f1_lr = f1_score(y, pred_lr, average='micro')
#     print("LR Micro-F1", f1_lr)
#     joblib.dump(lr, 'lr_std.model')
#
#     # ② GBDT
#     print(">>> 模型②：GradientBoosting（手推 CART）")
#     gbdt = GradientBoostingSTD(n_estimators=100, max_depth=3, learning_rate=0.1)
#     gbdt.fit(X, y)
#     pred_gbdt = gbdt.predict(X)
#     f1_gbdt = f1_score(y, pred_gbdt, average='micro')
#     print("GBDT Micro-F1", f1_gbdt)
#     joblib.dump(gbdt, 'gbdt_std.model')
#
#     # ③ NaiveBayes
#     print(">>> 模型③：NaiveBayes（手推多项式）")
#     nb = NaiveBayesSTD()
#     nb.fit(X, y)
#     pred_nb = nb.predict(X)
#     f1_nb = f1_score(y, pred_nb, average='micro')
#     print("NaiveBayes Micro-F1", f1_nb)
#     joblib.dump(nb, 'nb_std.model')
#
#     print(">>> 3 种标准库模型已保存：lr_std.model / gbdt_std.model / nb_std.model")
#
# if __name__ == '__main__':
#     main()