%% 初始化代码

clc;
clear;
% 引入数据读取代码
addpath('dataSet\EEG')
addpath('dataSet\KDD_CUP99')
addpath('dataSet\data_code')
addpath('dataSet\UCI_industry_detection')
% 引入模型代码
addpath('ELM_model\GSCN')
addpath('ELM_model\gen_weight')
addpath('ELM_model')
% 引入工具函数代码
addpath('tool_fun')
addpath('tool_fun\JM_distance')
addpath('tool_fun\inverse_function')




%% 引入脑电feature 数据集

% % EEG 数据 读取
% feature_file = fullfile('.', 'dataset', 'EEG', 'feature.xlsx');
% label_file = fullfile('.', 'dataset', 'EEG', 'label_profusion.txt');
% data = get_EEG_data(feature_file, label_file);

% KDD_CUP99
% train_path = 'kd99_train.mat';
% test_path = 'kd99_test.mat';
% [norm_train_data, norm_test_data] = kdd99_process_data(train_path, test_path);
% data = [norm_train_data;norm_test_data];


% UCI industry data 读取

% table_file = fullfile('.', 'dataset', 'UCI_industry_detection', 'faults.csv');
% data = get_UCI_industry_data(table_file);

% seeds data
txt_file = fullfile('.', 'dataset', 'classification','seeds', 'seeds_dataset.txt');
data = seeds_read(txt_file);

% wine data
% txt_file = fullfile('.', 'dataset', 'classification','wine', 'wine.data');
% data = wine_read(txt_file);

% Iris data
% txt_file = fullfile('.', 'dataset', 'classification','iris', 'iris.data');
% data = iris_read(txt_file);

% balance_scale_read data
% txt_file = fullfile('.', 'dataset', 'classification','balance', 'balance-scale.data');
% data = balance_scale_read(txt_file);

% spambase_read
% txt_file = fullfile('.', 'dataset', 'classification','spambase', 'spambase.data');
% data = spambase_read(txt_file);

% glass_read
% txt_file = fullfile('.', 'dataset', 'classification','glass', 'glass.data');
% data = glass_read(txt_file);

% usps_read
% txt_file = fullfile('.', 'dataset', 'classification','usps', 'USPS.mat');
% data = usps_read(txt_file);

% abalone_read
% txt_file = fullfile('.', 'dataset', 'classification','abalone', 'abalone.data');
% data = abalone_read(txt_file);

% Dry Bean
% txt_file = fullfile('.', 'dataset', 'classification','DryBeanDataset', 'Dry_Bean_Dataset.arff');
% data = dry_bean_read(txt_file);


% COIL-20
% txt_file = fullfile('.', 'dataset', 'classification','coil20', 'COIL20.mat');
% data = coil20_read(txt_file);

REGRESSION=0;
CLASSIFIER=1;

%% 设置迭代超参数 计算精度

% 起始的节点数量
node_begin = 20;
% 结束的节点数量
node_end = 40;
% 节点间隔
node_interval = 2;
% 每个节点重复训练的次数
node_train_number = 2;
% 数据集描述文字
name_of_datasets = 'usps Dataset datasets  matlab pinv VS 𝑄𝑅 𝐼𝐿𝑆𝑀';

model_compare(name_of_datasets,data,0.9,node_begin,node_end,node_interval,node_train_number);


%% seeds data

% txt_file = fullfile('.', 'dataset', 'classification','seeds', 'seeds_dataset.txt');
% data = seeds_read(txt_file);
% model_compare(name_of_datasets,data,0.9,node_begin,node_end,node_interval,node_train_number);









%% 下面试报废的测试代码

% hiden_node_number = 100;
% num_neighbor = 20; % num_neighbor 应该大于 feature数的，但是求伪逆即可
% num_attain_node = 10;
% num_pool = 50;
% NO_pool_sample = 6;
% [TrainingTime, TestingTime, TrainingAccuracy, TestingAccuracy] = MY_CDDM_ELM(data_train_scale, data_test_scale,hiden_node_number,num_attain_node,num_neighbor,num_pool,NO_pool_sample)




% 下面是数据预处理 此时应该不用
% [data_train, data_test] = holdoutSplit(data_random, 0.7);
% 
% clear data_random;
% %处理原始数据
% [max_col,min_col,mean_col,data_train(:,2:end)] = average_scale(data_train(:,2:end));
% data_test(:,2:end) = (data_test(:,2:end) - mean_col)./ (max_col - min_col);
% 
% clear max_col min_col mean_col;
% 
% JM_aver_list = cal_JM_aver_list(data_train);
% 
% % 计算原始值的 softmax 概率分布 处理原始数据
% softmax_probabilities = softmax(JM_aver_list).*100;
% train_scale = data_train;
% test_scale = data_test;
% train_scale(:,2:end) = data_train(:,2:end) .* softmax_probabilities';
% test_scale(:,2:end) = data_test(:,2:end) .* softmax_probabilities';

% num_train = size(data_train,1);
% num_feature = size(data_train,2)-1;
% index = 1 : NO_pool_sample; % 创建索引数组
% InputWeight_count = 1;
% 
% InputWeight = zeros(hiden_node_number,num_feature);
% InputBias=zeros(hiden_node_number,1);
% d = dist((data_train(:,2:end)')); %distance between input points
% [~,is] = sort(d); %indices of the nearest neighbours
% Weight_pool = zeros(num_pool,num_feature);
% bias_pool =zeros(num_pool,1);
% sort_pool = zeros(num_pool,1);
% 
% while InputWeight_count <(hiden_node_number+1)
%     % 产生随机样本
%     [InputWeight_temp, InputBias_temp] = generate_my_weights(data_train, num_attain_node, is, num_neighbor);
%     % 对更新向量进行打分，并将更新信息加入权重池
%     [Weight_pool, bias_pool, sort_pool] = add_into_pool(InputWeight_temp, InputBias_temp, data_train, Weight_pool, bias_pool, sort_pool);
%     
%     [sorted_values, sorted_indices] = sort(sort_pool, 'descend'); % 从大到小排序并获取索引值
%     % 取前 NO_pool_sample 个分数为一个数组
%     sample_list =sorted_values(1:NO_pool_sample);
%     % 将原始数列转换为概率分布
%     prob_dist = sample_list / sum(sample_list);
%     % 进行采样
%     sampled_index = randsample(index, 1, true, prob_dist);
%     %将采样得到的权重放进矩阵中
%     InputWeight(InputWeight_count,:) = Weight_pool(sorted_indices(sampled_index),:);
%     InputBias(InputWeight_count,:) = bias_pool(sorted_indices(sampled_index),:);
%     
%     %调整权重池
%     Weight_pool = [Weight_pool( sorted_indices(1:(sampled_index-1)) ,: );Weight_pool( sorted_indices((sampled_index+1):num_pool+1),:) ];
%     bias_pool = [bias_pool( sorted_indices( 1:(sampled_index-1) ) );bias_pool( sorted_indices( (sampled_index+1):num_pool+1 ) )];
%     sort_pool = [sorted_values( 1:(sampled_index-1) );sorted_values( (sampled_index+1):num_pool+1 )];
%     
%     InputWeight_count = InputWeight_count + 1;
% end
% 
% Y = calculate_output(data_train, InputWeight, InputBias,T);
% Y_test = calculate_output(data_test, InputWeight, InputBias,T_test);
% 
% [TrainingAccuracy, TestingAccuracy] = calAccuracy_analysis(T, Y, T_test, Y_test)
% 


% if rem(1,size(data_train,1)) == 1
%     ig = randperm(size(data_train,1),hiden_node_number); %choose randomly x*-points
%     xg = X(:,ig); %x*
%     ik = is(1:k,ig); %indices of the k nearest neighbours of x*
% end


