function [T_test, T_test_pre, T_train_err] = modelforresid(trend, num_buchang, num_train_set, options, num_hourpoint)
    % trend = data.trend;

    num_train1test = num_train_set * num_hourpoint;
    data_train = trend(1:num_train1test,:);
    data_test = trend(num_train1test+1-num_buchang:end,:);
    [data_shuru_train,data_shuchu_train]=data_process(data_train,num_buchang);   %步长为14，采用前14个时刻的发电功率预测第15个时刻的发电功率
    data_train = [data_shuru_train,data_shuchu_train];

    [data_shuru_test,data_shuchu_test]=data_process(data_test,num_buchang);   %步长为14，采用前14个时刻的发电功率预测第15个时刻的发电功率
    data_test = [data_shuru_test,data_shuchu_test];
    
    outdim = 1;                                                         % 最后一列为输出
    f_ = size(data_train, 2) - outdim;                          % 输入特征维度

    P_train = data_train(:, 1: f_)';
    T_train = data_train(:, f_ + 1: end)';
    num_train = size(P_train, 2);                                             % 训练集元素个数
    
    P_test = data_test(:, 1: f_)';
    T_test = data_test(:, f_ + 1: end)';
    num_test = size(P_test, 2);                                      % 测试集元素个数
    
    %% 模型训练
    %  数据归一化
    [p_train, ps_input] = mapminmax(P_train, 0, 1);
    p_test = mapminmax('apply', P_test, ps_input);
    
    [t_train, ps_output] = mapminmax(T_train, 0, 1);
    t_test = mapminmax('apply', T_test, ps_output);
    
    vp_train = mat2cell(p_train, size(p_train, 1), ones(1, num_train));
    vt_train = mat2cell(t_train, size(t_train, 1), ones(1, num_train));
    
    vp_test = mat2cell(p_test, size(p_test, 1), ones(1, num_test));
    vt_test = mat2cell(t_test, size(t_test, 1), ones(1, num_test));
    
    numFeatures = size(p_train,1);               % 特征维度（前14预测后1）


    %%
    filterSize = 3;  % 卷积核大小
    dropoutFactor = 0.1;
    numBlocks = 1; 
    numFilters = 30;    % 卷积核个数
    NumNeurons = 128;   % BiGRU神经元个数
    layer = sequenceInputLayer(f_, Normalization = "rescale-symmetric", Name = "input");
 
    % 创建网络图
    lgraph = layerGraph(layer);
    outputName = layer.Name;
     
    % 建立网络结构 -- 残差块
    for i = 1 : numBlocks
        
        dilationFactor = 2^(i-1);   % 膨胀因子 对应文中的1 2 4三层
     
        % 创建 TCN 正向支路
        layers = [
            convolution1dLayer(filterSize, numFilters, DilationFactor = dilationFactor, Padding = "causal", Name="conv1_" + i)  % 一维卷积层 
            layerNormalizationLayer                                                                                             % 层归一化
            spatialDropoutLayer(dropoutFactor)                                                                                  % 空间丢弃层
            convolution1dLayer(filterSize, numFilters, DilationFactor = dilationFactor, Padding = "causal")                     % 一维卷积层  
            layerNormalizationLayer                                                                                             % 层归一化
            reluLayer                                                                                                           % 激活层
            spatialDropoutLayer(dropoutFactor)                                                                                  % 空间丢弃层
            additionLayer(4, Name = "add_" + i)
        ];
     
        % 添加残差块到网络
        lgraph = addLayers(lgraph, layers);
     
        % 连接卷积层到残差块
        lgraph = connectLayers(lgraph, outputName, "conv1_" + i);
     
        % 创建 TCN 反向支路 flip网络结构
        Fliplayers = [
            FlipLayer("flip_" + i)                                                                                               % 反向翻转
            convolution1dLayer(1, numFilters, Name = "convSkip_"+i);                                                             % 反向残差连接
            convolution1dLayer(filterSize, numFilters, DilationFactor = dilationFactor, Padding = "causal", Name="conv2_" + i)   % 一维卷积层
            layerNormalizationLayer                                                                                              % 层归一化
            spatialDropoutLayer(dropoutFactor)                                                                                   % 空间丢弃层
            convolution1dLayer(filterSize, numFilters, DilationFactor = dilationFactor, Padding = "causal")                      % 一维卷积层
            layerNormalizationLayer                                                                                              % 层归一化
            reluLayer                                                                                                            % 激活层
            spatialDropoutLayer(dropoutFactor, Name="drop" + i)                                                                  % 空间丢弃层
        ];
     
        % 添加 flip 网络结构到网络
        lgraph = addLayers(lgraph, Fliplayers);
     
        % 连接 flip 卷积层到残差块
        lgraph = connectLayers(lgraph, outputName, "flip_" + i);
        lgraph = connectLayers(lgraph, "drop" + i, "add_" + i + "/in3");
        lgraph = connectLayers(lgraph, "convSkip_"+i, "add_" + i + "/in4");
        % 残差连接 -- 首层
        if i == 1
            % 建立残差卷积层
            % Include convolution in first skip connection.
            layer = convolution1dLayer(1,numFilters,Name="convSkip");
    
            lgraph = addLayers(lgraph,layer);
            lgraph = connectLayers(lgraph,outputName,"convSkip");
            lgraph = connectLayers(lgraph,"convSkip","add_" + i + "/in2");
        else
            lgraph = connectLayers(lgraph,outputName,"add_" + i + "/in2");
        end
        
        % Update layer output name.
        outputName = "add_" + i;
    end
    
    tempLayers = flattenLayer("Name","flatten");
    lgraph = addLayers(lgraph,tempLayers);
    
    tempLayers = gruLayer(NumNeurons,"Name","gru1");
    lgraph = addLayers(lgraph,tempLayers);
    
    tempLayers = [
        FlipLayer("flip3")
        gruLayer(NumNeurons,"Name","gru2")];
    lgraph = addLayers(lgraph,tempLayers);
    
    tempLayers = [
        concatenationLayer(1,2,"Name","concat")
        selfAttentionLayer(1,50,"Name","selfattention")   %Attention机制
        fullyConnectedLayer(outdim,"Name","fc")
        regressionLayer("Name","regressionoutput")];
    lgraph = addLayers(lgraph,tempLayers);
    
    lgraph = connectLayers(lgraph,outputName,"flatten");
    lgraph = connectLayers(lgraph,"flatten","gru1");
    lgraph = connectLayers(lgraph,"flatten","flip3");
    lgraph = connectLayers(lgraph,"gru1","concat/in1");
    lgraph = connectLayers(lgraph,"gru2","concat/in2");

    %% 模型训练、测试
    % 训练
    tic
    net = trainNetwork(vp_train, vt_train, lgraph, options);
    toc
    % analyzeNetwork(net);% 查看网络结构
    
    %  预测
    t_train_pre = predict(net, vp_train); 
    t_test_pre = predict(net, vp_test); 
    
    %  数据反归一化
    T_train_pre = mapminmax('reverse', t_train_pre, ps_output);
    T_test_pre = mapminmax('reverse', t_test_pre, ps_output);
    
    %  数据格式转换
    T_train_pre = cell2mat(T_train_pre);% cell2mat将cell元胞数组转换为普通数组
    T_test_pre = cell2mat(T_test_pre);
    
    T_train_pre = T_train_pre';
    T_test_pre = T_test_pre';
    
    disp('…………训练集误差指标…………')
    [mae1, rmse1, mape1, error1]=calc_error(T_train_pre,T_train);
    fprintf('\n')
    disp('…………测试集误差指标…………')
    [mae2, rmse2, mape2, error2]=calc_error(T_test_pre,T_test);
    fprintf('\n')
    
    % figure('Position',[650,400,600,200]);hold on;
    % plot(T_test_pre, 'r-','lineWidth', 1);hold on;
    % plot(T_test, 'b-','lineWidth', 1);hold on;
    % title('Trend测试集预测结果')
    % hold off;

    T_test_pre = T_test_pre';
    T_test = T_test';
    T_train_err = T_train_pre - T_train;
    if num_buchang < num_hourpoint
        T_train_err(:,num_train1test - num_hourpoint +1:end) = [];
    elseif num_buchang > num_hourpoint
        T_train_err = [T_train_err, zeros(1,num_buchang - num_hourpoint)] ;
    end

end



