import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.datasets import load_iris
from sklearn.preprocessing import StandardScaler
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.model_selection import train_test_split

# --- 1. 数据加载与筛选 ---
print("--- 1. 数据加载与筛选 ---")
iris = load_iris()
df = pd.DataFrame(iris.data, columns=iris.feature_names)
df['target'] = iris.target
df['species'] = iris.target_names[iris.target]

# 选择Setosa (0) 和 Versicolor (1) 两个类别
df_binary = df[df['target'].isin([0, 1])].copy()

# 将标签0映射为-1，1映射为1
df_binary['target_qboost'] = df_binary['target'].map({0: -1, 1: 1})

# 特征和标签
X = df_binary[iris.feature_names]
y = df_binary['target_qboost']
y_original = df_binary['target'] # 保留原始标签用于某些可视化，特别是LDA

N_samples = X.shape[0]
N_features = X.shape[1]
print(f"筛选后样本数量: {N_samples}")
print(f"特征数量: {N_features}\n")

# --- 2. 统计学特征分析 ---
print("--- 2. 统计学特征分析 ---")
print("整体描述性统计:\n", X.describe())
print("\n按类别分组描述性统计 (Setosa vs. Versicolor):")
print(df_binary.groupby('species')[iris.feature_names].describe())

# --- 3. 可视化 ---
print("\n--- 3. 数据可视化 ---")

# 箱线图
plt.figure(figsize=(15, 6))
for i, feature in enumerate(iris.feature_names):
    plt.subplot(1, N_features, i + 1)
    sns.boxplot(x='species', y=feature, data=df_binary)
    plt.title(f'Box Plot of {feature} by Species')
plt.tight_layout()
plt.savefig('boxplot_features.png')
plt.show()

# KDE/直方图
plt.figure(figsize=(15, 6))
for i, feature in enumerate(iris.feature_names):
    plt.subplot(1, N_features, i + 1)
    sns.histplot(data=df_binary, x=feature, hue='species', kde=True, palette='viridis')
    plt.title(f'Distribution of {feature} by Species')
plt.tight_layout()
plt.savefig('kde_hist_features.png')
plt.show()

# 散点图矩阵
sns.pairplot(df_binary, hue='species', vars=iris.feature_names, palette='viridis')
plt.suptitle('Pair Plot of Iris Features by Species', y=1.02)
plt.savefig('pairplot_features.png')
plt.show()

# --- 4. 标准化 ---
print("\n--- 4. 数据标准化 ---")
scaler = StandardScaler()
X_scaled = scaler.fit_transform(X)
X_scaled_df = pd.DataFrame(X_scaled, columns=iris.feature_names)
print("标准化后数据 (前5行):\n", X_scaled_df.head())

# --- 5. 降维 (LDA) ---
print("\n--- 5. 数据降维 (LDA) ---")
# LDA 仅在有监督任务中可用，且要求目标变量为整数类别，因此使用原始的 0/1 标签
# 修正: 对于二分类问题，LDA最多只能降到1维
lda = LinearDiscriminantAnalysis(n_components=1)
X_lda = lda.fit_transform(X_scaled, y_original) # 注意这里使用y_original

print(f"降维后数据维度: {X_lda.shape}")
print("降维后数据 (前5行):\n", X_lda[:5])

# 降维可视化 (1D LDA投影用直方图或KDE更合适)
plt.figure(figsize=(8, 6))
# 将1D投影结果转换为DataFrame以便于seaborn绘图
df_lda = pd.DataFrame({'LDA Component 1': X_lda[:, 0], 'species': y_original.map({0: 'setosa', 1: 'versicolor'})})
sns.histplot(data=df_lda, x='LDA Component 1', hue='species', kde=True, palette='viridis', stat='density', common_norm=False)
plt.title('1D LDA Projection of Iris Dataset (Setosa vs. Versicolor)')
plt.xlabel('LDA Component 1')
plt.ylabel('Density')
plt.grid(True)
plt.savefig('lda_1d_projection.png') # 保存为新的文件名
plt.show()

# --- 数据集划分 ---
print("\n--- 6. 数据集划分 ---")
# 使用标准化后的数据进行划分
X_train_scaled, X_test_scaled, y_train, y_test = train_test_split(
    X_scaled, y, test_size=0.3, random_state=42, stratify=y
)

print(f"训练集特征形状: {X_train_scaled.shape}, 训练集标签形状: {y_train.shape}")
print(f"测试集特征形状: {X_test_scaled.shape}, 测试集标签形状: {y_test.shape}")
print(f"训练集标签分布:\n{y_train.value_counts()}")
print(f"测试集标签分布:\n{y_test.value_counts()}")
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.datasets import load_iris
from sklearn.preprocessing import StandardScaler
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import accuracy_score
import random

# --- (接续上一步的代码，确保 X_train_scaled, y_train, y_test 等变量已定义) ---

# 假设X_train_scaled, y_train, X_test_scaled, y_test 已在上一步成功生成

# --- 5.1.2 弱分类器构建与 5.1.3 模型训练 ---
print("\n--- 5.1.2 弱分类器构建与 5.1.3 模型训练 ---")

# 定义弱分类器数量
M = 50
weak_classifiers_info = {} # 用于存储弱分类器及其相关信息
feature_names = iris.feature_names # 获取特征名称

print(f"将构建 {M} 个弱分类器...")

for j in range(M):
    # 随机选择一个特征进行训练
    # random.sample(list, k) 从序列中随机选择 k 个不重复的元素
    selected_feature_index = random.sample(range(N_features), 1)[0]
    selected_feature_name = feature_names[selected_feature_index]

    # 训练弱分类器 (决策树桩)
    # max_depth=1 确保其为弱分类器
    weak_clf = DecisionTreeClassifier(max_depth=1, random_state=j) # random_state 用于可复现性
    
    # 训练时只使用选定的单一特征
    # 注意: X_train_scaled 是 numpy 数组，需要通过索引选择列
    X_train_single_feature = X_train_scaled[:, selected_feature_index].reshape(-1, 1)
    
    weak_clf.fit(X_train_single_feature, y_train)
    
    # 获取在训练集上的预测结果
    # 决策树默认输出0或1，需要映射到-1或1
    h_j_train_preds_original = weak_clf.predict(X_train_single_feature)
    h_j_train_preds = np.where(h_j_train_preds_original == 1, 1, -1) # 映射 1->1, 0->-1

    # 计算在训练集上的准确率
    accuracy = accuracy_score(y_train, h_j_train_preds)
    
    # 存储弱分类器信息
    weak_classifiers_info[j] = {
        'model': weak_clf,
        'feature_index': selected_feature_index,
        'feature_name': selected_feature_name,
        'train_predictions': h_j_train_preds, # 在训练集上的预测结果
        'train_accuracy': accuracy
    }
    
    print(f"弱分类器 {j+1}/{M} (特征: {selected_feature_name}): 训练准确率 = {accuracy:.4f}")

# 汇总弱分类器性能
accuracies = [info['train_accuracy'] for info in weak_classifiers_info.values()]
print(f"\n所有弱分类器的平均训练准确率: {np.mean(accuracies):.4f}")
print(f"最高训练准确率: {np.max(accuracies):.4f}")
print(f"最低训练准确率: {np.min(accuracies):.4f}")

# 可以可视化弱分类器准确率分布
plt.figure(figsize=(10, 6))
sns.histplot(accuracies, kde=True, bins=10)
plt.title('Distribution of Weak Classifier Training Accuracies')
plt.xlabel('Training Accuracy')
plt.ylabel('Frequency')
plt.grid(True)
plt.savefig('weak_classifier_accuracies_distribution.png')
plt.show()

# 示例：查看某个弱分类器的详细信息
# print("\n示例：查看第一个弱分类器信息:")
# print(weak_classifiers_info[0])

# 保存所有弱分类器在训练集上的预测结果到一个Numpy数组，方便后续QUBO构建
# 形状应为 (N_train_samples, M)
all_h_j_train_preds = np.array([info['train_predictions'] for info in weak_classifiers_info.values()]).T
print(f"\n所有弱分类器在训练集上的预测结果矩阵形状: {all_h_j_train_preds.shape}") # 应该 (70, M)
import numpy as np
import pandas as pd
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import accuracy_score
from sklearn.datasets import load_iris
from sklearn.preprocessing import StandardScaler
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.model_selection import train_test_split
import random
import matplotlib.pyplot as plt
import seaborn as sns

# --- (假设前面所有代码已运行，包括数据预处理和弱分类器构建，以下变量已准备好) ---
# X_train_scaled, y_train, all_h_j_train_preds, M, N_features, iris.feature_names
# N_samples_train = X_train_scaled.shape[0] # N in the formula
# M = all_h_j_train_preds.shape[1] # M in the formula

# 再次确保这些变量可用 (如果分开运行，需要重新定义或作为参数传递)
iris = load_iris()
df = pd.DataFrame(iris.data, columns=iris.feature_names)
df['target'] = iris.target
df_binary = df[df['target'].isin([0, 1])].copy()
df_binary['target_qboost'] = df_binary['target'].map({0: -1, 1: 1})
X = df_binary[iris.feature_names]
y = df_binary['target_qboost']
y_original = df_binary['target']

scaler = StandardScaler()
X_scaled = scaler.fit_transform(X)
X_train_scaled, X_test_scaled, y_train, y_test = train_test_split(
    X_scaled, y, test_size=0.3, random_state=42, stratify=y
)

N_train_samples = X_train_scaled.shape[0]
N_features = X_train_scaled.shape[1]

M = 50 # 弱分类器数量
weak_classifiers_info = {}
feature_names = iris.feature_names

for j in range(M):
    selected_feature_index = random.sample(range(N_features), 1)[0]
    weak_clf = DecisionTreeClassifier(max_depth=1, random_state=j)
    X_train_single_feature = X_train_scaled[:, selected_feature_index].reshape(-1, 1)
    weak_clf.fit(X_train_single_feature, y_train)
    h_j_train_preds_original = weak_clf.predict(X_train_single_feature)
    h_j_train_preds = np.where(h_j_train_preds_original == 1, 1, -1)
    accuracy = accuracy_score(y_train, h_j_train_preds)
    weak_classifiers_info[j] = {
        'model': weak_clf,
        'feature_index': selected_feature_index,
        'feature_name': feature_names[selected_feature_index],
        'train_predictions': h_j_train_preds,
        'train_accuracy': accuracy
    }
all_h_j_train_preds = np.array([info['train_predictions'] for info in weak_classifiers_info.values()]).T


# --- 5.2.1 问题二模型建立 (QUBO转化) ---
print("\n--- 5.2.1 问题二模型建立 (QUBO转化) ---")

# 定义参数 A 和 B
A = 1.0  # 分类误差项的权重
B = 0.5  # L1正则化项的权重

# 初始化 QUBO 矩阵 Q
Q = np.zeros((M, M))

# 遍历所有弱分类器对，构建 Q 矩阵
for j in range(M):
    for k in range(M):
        if j == k:
            # 计算对角线元素 Q_jj
            # Q_jj = A * N - 2 * A * sum(y_i * h_j(x_i)) + B
            sum_yh = np.sum(y_train * all_h_j_train_preds[:, j])
            Q[j, j] = A * N_train_samples - 2 * A * sum_yh + B
        else:
            # 计算非对角线元素 Q_jk
            # Q_jk = A * sum(h_j(x_i) * h_k(x_i))
            sum_hk_hj = np.sum(all_h_j_train_preds[:, j] * all_h_j_train_preds[:, k])
            Q[j, k] = A * sum_hk_hj

print("QUBO 矩阵 Q 构建完成。形状:", Q.shape)
# 打印 Q 矩阵的部分内容，以验证
print("QUBO 矩阵 Q (部分):\n", Q[:5, :5]) # 打印左上角5x5部分

# --- 5.2.2 问题二模型求解 (QUBO求解 - 使用模拟SDK) ---
print("\n--- 5.2.2 问题二模型求解 (QUBO求解 - 模拟SDK) ---")

# 模拟 Kaiwu SDK 的简单接口
# 实际使用时，请替换为 Kaiwu SDK 的真实导入和调用
class MockAnnealingSolver:
    def solve(self, Q_matrix, num_reads=100):
        print(f"模拟Kaiwu SDK模拟退火求解器，求解QUBO矩阵，num_reads={num_reads}...")
        # 实际求解器会返回一个优化后的二进制向量
        # 这里为了演示流程，我们返回一个随机的二进制向量作为示例结果
        # 或者可以实现一个非常简陋的局部搜索来模拟，但这不是重点
        
        # 简单模拟：随机返回一个解，或者一个全1的解，或者一个全0的解
        # 更好的模拟可以是随机选择几个弱分类器
        np.random.seed(42) # 保证模拟结果可复现
        # 随机选择大约 M/5 到 M/2 个弱分类器
        num_selected = np.random.randint(M // 5, M // 2 + 1)
        w_solution = np.zeros(M, dtype=int)
        selected_indices = np.random.choice(M, num_selected, replace=False)
        w_solution[selected_indices] = 1
        
        print("模拟求解完成。")
        return w_solution

# 实例化模拟求解器
solver = MockAnnealingSolver()

# 调用求解器求解 QUBO
# num_reads: 执行的退火迭代次数或样本数
w_optimal = solver.solve(Q, num_reads=100)

print("Kaiwu SDK 模拟求解器返回的最优弱分类器权重 w_optimal:")
print(w_optimal)
print(f"选中的弱分类器数量 (w=1): {np.sum(w_optimal)}")

# 获取被选中的弱分类器的索引
selected_weak_classifier_indices = np.where(w_optimal == 1)[0]
print(f"被选中的弱分类器索引: {selected_weak_classifier_indices}")

# 进一步分析被选中的弱分类器
print("\n--- 被选中的弱分类器详细信息 ---")
if len(selected_weak_classifier_indices) > 0:
    for idx in selected_weak_classifier_indices:
        info = weak_classifiers_info[idx]
        print(f"  分类器 {idx+1}: 特征 '{info['feature_name']}', 训练准确率 {info['train_accuracy']:.4f}")
else:
    print("没有弱分类器被选中。可能参数B设置过高，导致正则化惩罚过大。")
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.datasets import load_iris
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import (
    accuracy_score, precision_score, recall_score, f1_score,
    roc_curve, auc, confusion_matrix, ConfusionMatrixDisplay
)
import random

# --- (确保所有前面定义的变量，特别是 X_train_scaled, y_train, X_test_scaled, y_test,
#      weak_classifiers_info, w_optimal, feature_names 等，在这里都是可用的) ---

# 为了代码独立性，将前面的一些初始化代码再次包含进来，
# 实际提交时可以删除重复部分，假设前几步已经运行并保持了状态
iris = load_iris()
df = pd.DataFrame(iris.data, columns=iris.feature_names)
df['target'] = iris.target
df_binary = df[df['target'].isin([0, 1])].copy()
df_binary['target_qboost'] = df_binary['target'].map({0: -1, 1: 1})
X = df_binary[iris.feature_names]
y = df_binary['target_qboost']
y_original = df_binary['target']

scaler = StandardScaler()
X_scaled = scaler.fit_transform(X)
X_train_scaled, X_test_scaled, y_train, y_test = train_test_split(
    X_scaled, y, test_size=0.3, random_state=42, stratify=y
)

N_train_samples = X_train_scaled.shape[0]
N_features = X_train_scaled.shape[1]

M = 50 # 弱分类器数量
weak_classifiers_info = {}
feature_names = iris.feature_names

for j in range(M):
    selected_feature_index = random.sample(range(N_features), 1)[0]
    weak_clf = DecisionTreeClassifier(max_depth=1, random_state=j)
    X_train_single_feature = X_train_scaled[:, selected_feature_index].reshape(-1, 1)
    weak_clf.fit(X_train_single_feature, y_train)
    h_j_train_preds_original = weak_clf.predict(X_train_single_feature)
    h_j_train_preds = np.where(h_j_train_preds_original == 1, 1, -1)
    weak_classifiers_info[j] = {
        'model': weak_clf,
        'feature_index': selected_feature_index,
        'feature_name': feature_names[selected_feature_index],
        'train_predictions': h_j_train_preds,
        'train_accuracy': accuracy_score(y_train, h_j_train_preds)
    }
all_h_j_train_preds = np.array([info['train_predictions'] for info in weak_classifiers_info.values()]).T

# Mock w_optimal from previous step's output (replace with actual if running sequentially)
w_optimal = np.array([0, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1])


# --- 5.3.1 模型评价指标 ---
print("\n--- 5.3.1 模型评价指标 ---")

# 定义强分类器函数
def strong_classifier_predict(X_data, w_opt, weak_clfs_info, feature_names_list):
    """
    根据最优权重w_opt和弱分类器信息，计算强分类器的预测结果。
    
    Args:
        X_data (np.array): 待预测的特征数据 (scaled).
        w_opt (np.array): 最优的二进制权重向量.
        weak_clfs_info (dict): 包含所有弱分类器模型及其信息的字典.
        feature_names_list (list): 原始特征名称列表。
        
    Returns:
        np.array: 强分类器的预测结果 (1或-1).
        np.array: 强分类器的决策分数 (用于AUC和ROC曲线).
    """
    N_samples_data = X_data.shape[0]
    
    # 初始化累积预测分数
    # scores = np.zeros(N_samples_data) # 直接加权求和，然后取sign
    
    # 存储每个选中弱分类器的预测，形状 (N_samples_data, num_selected_classifiers)
    selected_preds_list = []

    for j, w_j in enumerate(w_opt):
        if w_j == 1: # 如果弱分类器被选中
            clf_info = weak_clfs_info[j]
            weak_clf_model = clf_info['model']
            selected_feature_index = clf_info['feature_index']
            
            # 提取弱分类器训练时使用的特定特征进行预测
            X_data_single_feature = X_data[:, selected_feature_index].reshape(-1, 1)
            
            # 获取预测结果 (DecisionTreeClassifier.predict 默认返回 0/1)
            # DecisionTreeClassifier.predict_proba 返回 [proba_class_0, proba_class_1]
            # 为了获取原始的h_j(x_i)值 (-1或1)，直接用predict然后映射
            h_j_preds_original = weak_clf_model.predict(X_data_single_feature)
            h_j_preds = np.where(h_j_preds_original == 1, 1, -1) # 映射 1->1, 0->-1
            
            selected_preds_list.append(h_j_preds)
    
    if not selected_preds_list: # 如果没有弱分类器被选中
        print("警告: 没有弱分类器被选中。强分类器将返回全0预测。")
        decision_scores = np.zeros(N_samples_data)
    else:
        # 将所有选中弱分类器的预测结果按列堆叠
        all_selected_preds = np.array(selected_preds_list).T # 形状 (N_samples, num_selected_classifiers)
        
        # 加权求和 (w_j 已经是1或0，所以直接求和即可)
        # 这里的 w_j 就是 1，因为我们只考虑了 w_j = 1 的分类器
        decision_scores = np.sum(all_selected_preds, axis=1)

    # 最终预测结果
    predictions = np.sign(decision_scores)
    # 对于 decision_scores == 0 的情况，np.sign(0) 返回 0，需要根据惯例处理，
    # 例如，将其归为某一类，或者表示无法确定。对于二分类，通常设置为1或-1。
    # 这里我们保持np.sign的原始行为，即0映射为0，如果需要，可以进一步处理
    predictions[predictions == 0] = 1 # 如果决策分数是0，默认归为正类（或-1，取决于业务）
    
    return predictions.astype(int), decision_scores


# 在训练集上评估
y_train_pred, y_train_scores = strong_classifier_predict(X_train_scaled, w_optimal, weak_classifiers_info, feature_names)

print("\n--- 训练集评估 ---")
train_accuracy = accuracy_score(y_train, y_train_pred)
train_precision = precision_score(y_train, y_train_pred, pos_label=1) # pos_label=1 对应原始标签1
train_recall = recall_score(y_train, y_train_pred, pos_label=1)
train_f1 = f1_score(y_train, y_train_pred, pos_label=1)

print(f"训练集准确率: {train_accuracy:.4f}")
print(f"训练集精确率: {train_precision:.4f}")
print(f"训练集召回率: {train_recall:.4f}")
print(f"训练集F1分数: {train_f1:.4f}")

# 训练集混淆矩阵
cm_train = confusion_matrix(y_train, y_train_pred, labels=[-1, 1])
disp_train = ConfusionMatrixDisplay(confusion_matrix=cm_train, display_labels=['Setosa (-1)', 'Versicolor (1)'])
plt.figure(figsize=(6, 6))
disp_train.plot(cmap=plt.cm.Blues, values_format='d')
plt.title('Confusion Matrix - Training Set')
plt.savefig('confusion_matrix_train.png')
plt.show()

# 训练集 ROC 曲线和 AUC
# y_true 必须是 0 或 1 用于 roc_curve，所以我们将 y_train 转换回 0/1
y_train_original_labels = np.where(y_train == 1, 1, 0) # 1 -> 1, -1 -> 0
# y_score 应该是正类的概率或决策分数
# 决策分数越大，越倾向于正类（1），越小，越倾向于负类（-1）
# 对于roc_curve，y_score 通常是正类的预测概率。这里我们使用决策分数。
# np.sign(y_train_scores) 是预测的类别，roc_curve需要概率或决策分数，所以直接用y_train_scores
fpr_train, tpr_train, _ = roc_curve(y_train_original_labels, y_train_scores)
roc_auc_train = auc(fpr_train, tpr_train)

plt.figure(figsize=(8, 6))
plt.plot(fpr_train, tpr_train, color='darkorange', lw=2, label=f'ROC curve (area = {roc_auc_train:.2f})')
plt.plot([0, 1], [0, 1], color='navy', lw=2, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver Operating Characteristic - Training Set')
plt.legend(loc="lower right")
plt.grid(True)
plt.savefig('roc_curve_train.png')
plt.show()


# 在测试集上评估
y_test_pred, y_test_scores = strong_classifier_predict(X_test_scaled, w_optimal, weak_classifiers_info, feature_names)

print("\n--- 测试集评估 ---")
test_accuracy = accuracy_score(y_test, y_test_pred)
test_precision = precision_score(y_test, y_test_pred, pos_label=1)
test_recall = recall_score(y_test, y_test_pred, pos_label=1)
test_f1 = f1_score(y_test, y_test_pred, pos_label=1)

print(f"测试集准确率: {test_accuracy:.4f}")
print(f"测试集精确率: {test_precision:.4f}")
print(f"测试集召回率: {test_recall:.4f}")
print(f"测试集F1分数: {test_f1:.4f}")

# 测试集混淆矩阵
cm_test = confusion_matrix(y_test, y_test_pred, labels=[-1, 1])
disp_test = ConfusionMatrixDisplay(confusion_matrix=cm_test, display_labels=['Setosa (-1)', 'Versicolor (1)'])
plt.figure(figsize=(6, 6))
disp_test.plot(cmap=plt.cm.Blues, values_format='d')
plt.title('Confusion Matrix - Test Set')
plt.savefig('confusion_matrix_test.png')
plt.show()

# 测试集 ROC 曲线和 AUC
y_test_original_labels = np.where(y_test == 1, 1, 0) # 1 -> 1, -1 -> 0
fpr_test, tpr_test, _ = roc_curve(y_test_original_labels, y_test_scores)
roc_auc_test = auc(fpr_test, tpr_test)

plt.figure(figsize=(8, 6))
plt.plot(fpr_test, tpr_test, color='darkorange', lw=2, label=f'ROC curve (area = {roc_auc_test:.2f})')
plt.plot([0, 1], [0, 1], color='navy', lw=2, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver Operating Characteristic - Test Set')
plt.legend(loc="lower right")
plt.grid(True)
plt.savefig('roc_curve_test.png')
plt.show()