%%
%第二问第三问决策树的部分
rng(2); 
%%
%新的进行实现
dataMatrix = readmatrix('副本附件(1)(2).xlsx', 'Range','H2:T58');
    
classLabels = [12;12;12;12;12;12;12;11;11;11;11;12;12;12;12;12;11;11;21;21;21;21;21;22;22;21;21;22;22;22;22;22;21;22;21;22;21;21;21;21;21;21;22;22;22;21;21;21;21;21;21;21;21;22;21;21;21]; % 对应于 dataMatrix 中的样本
% 从数据中构建决策树
depthLimit = 3; % 限制树的深度
decisionTree = buildTree(dataMatrix, classLabels, depthLimit);

categoricalPredictorNames = {}; % 特征变量名，如果是数值类型则留空
cvs = cvpartition(size(classLabels, 1), 'HoldOut', 0.3); % 留30%的数据作为测试集
XTrain = dataMatrix(cvs.training, :);
YTrain = classLabels(cvs.training);
XTest = dataMatrix(cvs.test, :);
YTest = classLabels(cvs.test);
rng(0); 
% 使用fitctree训练决策树模型
ct = fitctree(XTrain, YTrain);
% 可视化决策树
predictions = predict(ct, XTest);

view(ct,'Mode','graph');
%%
% 假设你已经有了一个训练好的决策树模型 savedTree
% 假设 unknownSamples 是未知类别玻璃文物的化学成分数据
% 使用决策树模型进行预测
unknownSamples=readmatrix("副本附件(1)(2).xlsx","Sheet","表单3","Range",'D2:P9');
predictedTypes = predict(ct, unknownSamples);

% 敏感性分析示例：改变某个特征的值并观察结果
% 例如，改变第一个样本的氧化铅(PbO)含量
originalValue = unknownSamples(1, find(strcmp(unknownSamples(1,:), '氧化铅(PbO)')));
sensitivitySamples = unknownSamples;
sensitivitySamples(1, find(strcmp(sensitivitySamples(1,:), '氧化铅(PbO)'))) = originalValue * 1.5; % 增加50%
sensitivityPredictions = predict(ct, sensitivitySamples);

% 输出预测结果和敏感性分析结果
disp('Predicted Types:');
disp(predictedTypes);
disp('Sensitivity Analysis:');
disp(sensitivityPredictions);

%%
% 计算信息熵
function entropy = calculateEntropy(classLabels)
    probOfClass1 = sum(classLabels == 1) / numel(classLabels);
    probOfClass2 = sum(classLabels == 2) / numel(classLabels);
    entropy = -probOfClass1 * log2(probOfClass1) - probOfClass2 * log2(probOfClass2);
end
%%
% 计算信息增益
function infoGain = calculateInfoGain(dataMatrix, classLabels, columnName, classLabel)
    subset = dataMatrix(:, columnName);
    uniqueValues = unique(subset);
    totalExamples = numel(classLabels);
    infoGain = calculateEntropy(classLabels);
    
    for i = 1:numel(uniqueValues)
        count = sum(subset == uniqueValues(i));
        weight = count / totalExamples;
        if count > 0
            classLabelsSubset = classLabels(subset == uniqueValues(i));
            infoGain = infoGain - weight * calculateEntropy(classLabelsSubset);
        end
    end
end

%%
% 选择最佳分裂属性
function [bestAttribute, bestGain] = selectBestAttributeToSplit(dataMatrix, classLabels)
    [num, ~] = size(dataMatrix);
    initialEntropy = calculateEntropy(classLabels);
    bestAttribute = -1;
    bestGain = 0;
    
    for i = 1:size(dataMatrix, 2)
        infoGain = calculateInfoGain(dataMatrix, classLabels, i, bestGain);
        if infoGain > bestGain
            bestGain = infoGain;
            bestAttribute = i;
        end
    end
end
%%
% 构建决策树
function decisionTree = buildTree(dataMatrix, classLabels, depth)
    if depth > 10 || all(classLabels == classLabels(1))
        decisionTree = classLabels(1);
        return;
    end
    
    [bestAttribute, bestGain] = selectBestAttributeToSplit(dataMatrix, classLabels);
    if bestGain == 0
        decisionTree = classLabels(1);
        return;
    end
    
    uniqueValues = unique(dataMatrix(:, bestAttribute));
    decisionTree = struct('attribute', bestAttribute, 'value', []);
    
    for i = 1:numel(uniqueValues)
        subset = dataMatrix(dataMatrix(:, bestAttribute) == uniqueValues(i), :);
        subsetClassLabels = classLabels(dataMatrix(:, bestAttribute) == uniqueValues(i));
        decisionTree(i) = buildTree(subset, subsetClassLabels, depth+1);
    end
    view(decisionTree,'Mode','graph');
end
%%
% 打印决策树
function printTree(decisionTree, level)
    if isnumeric(decisionTree)
        fprintf('This node is a leaf and predicts %d\n', decisionTree);
        return;
    end
    fprintf( 'The decision at this node is attribute %d\n', decisionTree.attribute);
    for i = 1:numel(decisionTree)
        fprintf('When attribute %d is %f\n', decisionTree(1).attribute, decisionTree(i).value);
        printTree(decisionTree(i), level + 2);
    end
end










