# 算法预测.py  （工单编号已写入头部）
# 大数据-用户画像-18-京东评论情感预测
# ---------------------------------------------------------
# 轮次 1：基础特征
# 轮次 2：特征衍生 + 选择
# 轮次 3：超参调优
# ---------------------------------------------------------
import csv, math, random, joblib, glob, os
from sklearn.metrics import f1_score
from collections import Counter
MAX_ROWS = 200          # ← 200 条极速
ROUND = 3               # ← 跑 3 轮
RESULTS = []            # ← 每轮结果（截图用）

# ========= 模型①：手推逻辑回归 =========
class LogisticRegressionSTD:
    def __init__(self, lr=0.1, epochs=10):
        self.lr = lr
        self.epochs = epochs
        self.coef = None

    def fit(self, X, y):
        n = len(X[0])
        self.coef = [0.0] * (n + 1)
        for epoch in range(self.epochs):
            for row, label in zip(X, y):
                z = self.coef[0] + sum(c * v for c, v in zip(self.coef[1:], row))
                p = 1.0 / (1.0 + math.exp(-z))
                err = label - p
                self.coef[0] += self.lr * err
                for i in range(len(row)):
                    self.coef[i + 1] += self.lr * err * row[i]

    def predict(self, X):
        return [1 if (self.coef[0] + sum(c * v for c, v in zip(self.coef[1:], row))) >= 0 else 0 for row in X]

# ========= 模型②：手推梯度提升树（CART） =========
class CARTNode:
    def __init__(self, gini, num_samples, num_classes, value, feature_idx=None, threshold=None, left=None, right=None):
        self.gini = gini
        self.num_samples = num_samples
        self.num_classes = num_classes
        self.value = value
        self.feature_idx = feature_idx
        self.threshold = threshold
        self.left = left
        self.right = right


class GradientBoostingSTD:
    def __init__(self, n_estimators=100, max_depth=3, learning_rate=0.1):
        self.n_estimators = n_estimators
        self.max_depth = max_depth
        self.learning_rate = learning_rate
        self.trees = []

    def _gini(self, y):
        m = len(y)
        if m == 0:
            return 0.0
        counts = Counter(y)
        return 1.0 - sum((c / m) ** 2 for c in counts.values())

    def _best_split(self, X, y):
        m, n = len(y), len(X[0])
        best_gini, best_idx, best_thr = 1.0, None, None
        for feature_idx in range(n):
            thresholds = sorted(set(row[feature_idx] for row in X))
            for thr in thresholds:
                left_y = [y[i] for i in range(m) if X[i][feature_idx] <= thr]
                right_y = [y[i] for i in range(m) if X[i][feature_idx] > thr]
                gini = (len(left_y) / m) * self._gini(left_y) + (len(right_y) / m) * self._gini(right_y)
                if gini < best_gini:
                    best_gini, best_idx, best_thr = gini, feature_idx, thr
        return best_idx, best_thr

    def _build_tree(self, X, y, depth=0):
        if not y:  # 空列表直接返回 None
            return None

        num_samples_per_class = Counter(y)
        predicted_class = max(num_samples_per_class, key=num_samples_per_class.get)
        node = CARTNode(
            gini=self._gini(y),
            num_samples=len(y),
            num_classes=len(num_samples_per_class),
            value=predicted_class
        )

        if depth < self.max_depth:
            idx, thr = self._best_split(X, y)
            if idx is not None:
                left_X, left_y, right_X, right_y = [], [], [], []
                for i in range(len(y)):
                    if X[i][idx] <= thr:
                        left_X.append(X[i])
                        left_y.append(y[i])
                    else:
                        right_X.append(X[i])
                        right_y.append(y[i])
                node.feature_idx = idx
                node.threshold = thr
                node.left = self._build_tree(left_X, left_y, depth + 1)
                node.right = self._build_tree(right_X, right_y, depth + 1)

        return node
    def fit(self, X, y):
        self.trees = []
        residual = y[:]
        for _ in range(self.n_estimators):
            tree = self._build_tree(X, residual)
            self.trees.append(tree)
            pred = [self._predict_one(tree, row) for row in X]
            residual = [r - p for r, p in zip(residual, pred)]

    def _predict_one(self, node, row):
        while node.left or node.right:
            if row[node.feature_idx] <= node.threshold:
                node = node.left
            else:
                node = node.right
        return node.value

    def predict(self, X):
        preds = [0] * len(X)
        for tree in self.trees:
            for i, row in enumerate(X):
                preds[i] += self.learning_rate * self._predict_one(tree, row)
        return [1 if p >= 0.5 else 0 for p in preds]

# ========= 轮次函数 =========
def round1(X, y):
    print(">>> 轮次1：基础特征（txt_len, title_len, cate1,2,3, u30, pos_rate）")
    lr = LogisticRegressionSTD(lr=0.1, epochs=10)
    lr.fit(X, y)
    pred = lr.predict(X)
    f1 = f1_score(y, pred, average='micro')
    print("轮次1 LR Micro-F1", f1)
    return f1

def round2(X, y):
    print(">>> 轮次2：衍生 + 选择（txt/title 比例、类目交叉、u30 分桶）")
    # 衍生
    txt_ratio = [row[0] / (row[1] + 1) for row in X]
    cate_cross = [f"{int(row[2])}_{int(row[3])}" for row in X]
    u30_bucket = [min(int(row[5] // 10), 4) for row in X]
    X2 = [row + [txt_ratio[i], hash(cate_cross[i]) % 100, u30_bucket[i]] for i, row in enumerate(X)]
    # 选择（前 10 列）
    X2 = [row[:10] for row in X2]
    lr = LogisticRegressionSTD(lr=0.1, epochs=10)
    lr.fit(X2, y)
    pred = lr.predict(X2)
    f1 = f1_score(y, pred, average='micro')
    print("轮次2 LR Micro-F1", f1)
    return f1

def round3(X, y):
    print(">>> 轮次3：超参调优（GBDT n_estimators=300, lr=0.05）")
    gbdt = GradientBoostingSTD(n_estimators=300, max_depth=4, learning_rate=0.05)
    gbdt.fit(X, y)
    pred = gbdt.predict(X)
    f1 = f1_score(y, pred, average='micro')
    print("轮次3 GBDT Micro-F1", f1)
    return f1

# ========= 主流程 =========
def main():
    files = glob.glob(r'E:\pythonProject\Xmkb_qg\train_data\*')
    print('>>> 文件列表', files)
    X, y = [], []
    count = 0
    for f in files:
        for row in csv.reader(open(f, newline=''), delimiter='\t'):
            if len(row) != 8:
                continue
            if count >= MAX_ROWS:
                break
            if random.random() < 0.001:
                X.append([float(x) for x in row[1:]])
                y.append(int(row[0]))
                count += 1
        if count >= MAX_ROWS:
            break
    print('>>> 实际样本数', len(X))

    # ① 基础特征
    f1_r1 = round1(X, y)
    # ② 衍生 + 选择
    f1_r2 = round2(X, y)
    # ③ 超参调优
    f1_r3 = round3(X, y)

    print(">>> 3 轮优化完成，结果：", [(f"R{i+1}", f1) for i, f1 in enumerate([f1_r1, f1_r2, f1_r3])])
    joblib.dump([(f"R{i+1}", f1) for i, f1 in enumerate([f1_r1, f1_r2, f1_r3])], 'optimize_200_results.pkl')

if __name__ == '__main__':
    main()


# import glob
# import pandas as pd
# import joblib
# import torch
# from torch.utils.data import DataLoader, TensorDataset
# from sklearn.metrics import f1_score
# from transformers import AutoTokenizer, AutoModelForSequenceClassification
#
# # ---------- 1. 加载数据 ----------
# files = glob.glob(r'E:\pythonProject\Xmkb_qg\train_data\*')
# df = pd.concat(
#     [pd.read_csv(f, sep='\t',
#                  names=['y', 'comment_content', 'txt_len', 'title_len',
#                         'cate1', 'cate2', 'cate3', 'u30', 'pos_rate'])
#      for f in files]
# )
# print('>>> 文件列表', files)
#
# # ---------- 2. 特征衍生（仅 LR/GBDT 用，BERT 不用） ----------
# df['txt_title_ratio'] = df['txt_len'] / (df['title_len'] + 1)
# df['cate_cross'] = df['cate1'].astype(str) + '_' + df['cate2'].astype(str)
# df['u30_bucket'] = pd.cut(df['u30'], bins=5, labels=False)
#
# # ---------- 3. BERT 预测函数 ----------
# tokenizer = AutoTokenizer.from_pretrained("bert-base-chinese")
# model = AutoModelForSequenceClassification.from_pretrained(
#     "bert-base-chinese", num_labels=5
# )
# model.eval()  # 推理模式
#
#
# def bert_predict(texts, max_len=128, batch_size=32):
#     """
#     texts: list[str]
#     return: list[int]  预测标签
#     """
#     # 如果 tokenizer 没有 pad_token，手动指定
#     if tokenizer.pad_token is None:
#         tokenizer.pad_token = tokenizer.eos_token
#
#     enc = tokenizer(
#         texts,
#         padding=True,
#         truncation=True,
#         max_length=max_len,
#         return_tensors="pt",
#     )
#     dataset = TensorDataset(enc["input_ids"], enc["attention_mask"])
#     loader = DataLoader(dataset, batch_size=batch_size, shuffle=False)
#
#     preds = []
#     with torch.no_grad():
#         for input_ids, attn_mask in loader:
#             logits = model(input_ids=input_ids, attention_mask=attn_mask).logits
#             preds += torch.argmax(logits, dim=1).tolist()
#     return preds
#
#
# # ---------- 4. 运行 BERT 预测 ----------
# pred_bert = bert_predict(df['comment_content'].astype(str).tolist())
# print("BERT Micro-F1", f1_score(df['y'], pred_bert, average='micro'))