# coding:utf-8

import time
import numpy as np
import sklearn.svm as svm
import joblib
from dataset import get_satellite, get_data32
import pickle
from sklearn.model_selection import train_test_split,cross_val_score
from sklearn.metrics import accuracy_score, precision_recall_fscore_support
from plot import _append_to_csv
from plot import _plot_from_csv

class TSVM(object):
    def __init__(self):
        pass

    def initial(self, kernel='linear',conf_mode='hard',tau=1.0):
        '''
        Initial TSVM
        Parameters
        ----------
        kernel: kernel of svm
        '''
        self.Cl, self.Cu = 1.5, 0.001
        self.kernel = kernel
        self.conf_mode = conf_mode
        self.tau = tau
        self.clf = svm.SVC(C=1.5, kernel=self.kernel)

    def load(self, model_path='./TSVM.model'):
        '''
        Load TSVM from model_path
        Parameters
        ----------
        model_path: model path of TSVM
                        model should be svm in sklearn and saved by sklearn.externals.joblib
        '''
        self.clf = joblib.load(model_path)

    def train(self, X1, Y1, X2):
        """
        X1, Y1 : 已标记样本 (n_l, d)  (n_l,)
        X2     : 未标记样本 (n_u, d)
        训练结果存到 self.clf
        -----------
        需要在 self.initial() 里先设置:
            self.Cl          已标记惩罚系数
            self.Cu          未标记初始惩罚
            self.conf_mode   'hard' or 'soft'
            self.tau         置信度阈值/温度
        """
        # 1️⃣ 第一次仅用已标记集训练，得到首轮伪标签
        self.clf.fit(X1, Y1)
        Y2 = self.clf.predict(X2)  # shape (n_u,)

        # 2️⃣ 外层循环：退火式提高 Cu
        while self.Cu < self.Cl:
            print("Cu:",self.Cu,"Cl:",self.Cl)
            # ── 2-a 计算 margin 与置信度
            margin = np.abs(self.clf.decision_function(X2))  # |f(x)|

            # ── 2-b 根据 conf_mode 生成 X3, Y3, sample_weight
            if self.conf_mode == 'hard':
                keep_mask = margin >= self.tau  # True/False
                X2_used = X2[keep_mask]
                Y2_used = Y2[keep_mask]

                X3 = np.vstack([X1, X2_used])  # 特征全集
                Y3 = np.concatenate([Y1, Y2_used])  # 标签全集 (1-D)

                sample_weight = np.concatenate([
                    np.full(len(X1), self.Cl),  # 已标记权重
                    np.full(len(X2_used), self.Cu)  # 未标记权重
                ])
            elif self.conf_mode == 'soft':  # soft
                g = np.minimum(margin / self.tau, 1.0)  # 权重缩放 ∈(0,1]
                X3 = np.vstack([X1, X2])
                Y3 = np.concatenate([Y1, Y2])

                sample_weight = np.concatenate([
                    np.full(len(X1), self.Cl),
                    self.Cu * g     # 置信度加权
                ])
            else:
                X3 = np.vstack([X1, X2])
                Y3 = np.concatenate([Y1, Y2])
                sample_weight = np.concatenate([
                    np.full(len(X1), self.Cl),  # 已标记权重
                    np.full(len(X2), self.Cu)  # 未标记权重
                ])


            # 断言长度一致，防止“inconsistent samples”错误
            assert len(X3) == len(Y3) == len(sample_weight)

            # ── 2-c 重新训练带权 SVM
            self.clf.fit(X3, Y3, sample_weight=sample_weight)

            # ── 2-d 内层 label-swap：翻转 margin 最大的一正一负伪标签
            while True:
                Y2_d = self.clf.decision_function(X2)  # f(x) (n_u,)
                epsilon = 1 - Y2 * Y2_d  # 功能间隔差值
                pos_mask, neg_mask = Y2 > 0, Y2 < 0  # 正/负伪标签
                if not pos_mask.any() or not neg_mask.any():
                    break  # 没有可交换对

                pos_idx = np.argmax(epsilon[pos_mask])  # 正类最大 ε
                neg_idx = np.argmax(epsilon[neg_mask])  # 负类最大 ε
                p_id = np.flatnonzero(pos_mask)[pos_idx]
                n_id = np.flatnonzero(neg_mask)[neg_idx]

                if epsilon[p_id] > 0 and epsilon[n_id] > 0 \
                        and epsilon[p_id] + epsilon[n_id] > 2.0:  # 满足交换条件
                    Y2[p_id] *= -1
                    Y2[n_id] *= -1
                else:
                    break

            # ── 2-e 退火：Cu ×2，上限 Cl；τ 可选递减
            self.Cu = min(2 * self.Cu, self.Cl)
            self.tau = max(self.tau * 0.9, 0.05)  # 可选：逐步放宽

    def score(self, X, Y):
        '''
        Calculate accuracy of TSVM by X, Y
        Parameters
        ----------
        X: Input data
                np.array, shape:[n, m], n: numbers of samples, m: numbers of features
        Y: labels of X
                np.array, shape:[n, ], n: numbers of samples
        Returns
        -------
        Accuracy of TSVM
                float
        '''
        return self.clf.score(X, Y)

    def predict(self, X):
        '''
        Feed X and predict Y by TSVM
        Parameters
        ----------
        X: Input data
                np.array, shape:[n, m], n: numbers of samples, m: numbers of features
        Returns
        -------
        labels of X
                np.array, shape:[n, ], n: numbers of samples
        '''
        return self.clf.predict(X)

    def save(self, path='./TSVM.model'):
        '''
        Save TSVM to model_path
        Parameters
        ----------
        model_path: model path of TSVM
                        model should be svm in sklearn
        '''
        joblib.dump(self.clf, path)

def sweep_and_plot(taus,
                   strategies=('default', 'hard', 'soft'),
                   csv_acc='results_acc.csv',
                   csv_f1='results_f1.csv',
                   csv_time='results_time.csv'):
    """
    多策略 × 多 τ：
        1) 训练 TSVM
        2) 记录 Accuracy、F1、训练时长
        3) 分别累计到 CSV 并画 3 张折线图
    """
    # ----------- 容器初始化 -----------
    acc_dict  = {s: [] for s in strategies}
    f1_dict   = {s: [] for s in strategies}
    time_dict = {s: [] for s in strategies}

    # 统一加载一次数据
    from dataset import get_ionosphere
    Lx, Ly, Ux, Uy = get_ionosphere(n_labeled=50)

    for tau in taus:
        for mode in strategies:
            conf_mode = 'none' if mode == 'default' else mode
            model = TSVM()
            model.initial(kernel='linear',
                          conf_mode=conf_mode,
                          tau=tau)

            tic = time.perf_counter()
            model.train(Lx, Ly, Ux)
            train_sec = time.perf_counter() - tic

            y_pred = model.predict(Ux)
            acc = accuracy_score(Uy, y_pred)
            _, _, f1, _ = precision_recall_fscore_support(
                Uy, y_pred, average='binary', pos_label=1)

            # 存结果
            acc_dict[mode].append(acc)
            f1_dict[mode].append(f1)
            time_dict[mode].append(train_sec)

            print(f'[τ={tau:.2f}, {mode:<7}]  '
                  f'acc={acc:.4f}  f1={f1:.4f}  time={train_sec:.3f}s')

    # ---------- 累积写 CSV ----------
    _append_to_csv(acc_dict,  taus, csv_path=csv_acc)
    _append_to_csv(f1_dict,   taus, csv_path=csv_f1)
    _append_to_csv(time_dict, taus, csv_path=csv_time)

    # ---------- 画三张图 ----------
    _plot_from_csv(csv_path=csv_acc,  out_png='acc_vs_tau.png',y_label='acc')
    _plot_from_csv(csv_path=csv_f1,   out_png='f1_vs_tau.png',y_label='f')
    _plot_from_csv(csv_path=csv_time, out_png='time_vs_tau.png',y_label='time')

    print('✅  三张图已更新：acc_vs_tau.png, f1_vs_tau.png, time_vs_tau.png')

if __name__ == '__main__':
    taus = [0.1, 0.4, 0.6, 0.8, 1.0]
    sweep_and_plot(taus)