classdef model_cifar10
    properties
        % 存储模型参数
        weights = struct()
        tensor_details = struct()
        model_dir % 添加模型目录属性
    end
    
    methods
        function obj = model_cifar10(model_directory)
            % 构造函数：设置模型目录并加载参数
            if nargin < 1
                % 默认模型目录
                obj.model_dir = 'd3402558e7f8076bcc362b636a581683_c6ee4c0d57b8d2f4376fd489248dda2d_8/cnn_cifar10/';
            else
                obj.model_dir = model_directory;
            end
            obj = obj.load_parameters();
        end
        
        function obj = load_parameters(obj)
            % 加载tensor details
            tensor_details_path = fullfile(obj.model_dir, 'tensor_details_cifar10.txt');
            if ~exist(tensor_details_path, 'file')
                error('Cannot find tensor details file: %s', tensor_details_path);
            end
            obj.tensor_details = obj.parse_tensor_details(tensor_details_path);
            
            % 加载权重文件
            weight_files = {
                'tfl.pseudo_qconst33_weights_cifar10.txt',  % 第一层卷积
                'tfl.pseudo_qconst29_weights_cifar10.txt',  % 第二层卷积
                'tfl.pseudo_qconst17_weights_cifar10.txt',  % 第三层卷积
                'tfl.pseudo_qconst4_weights_cifar10.txt',   % 第四层卷积
                'tfl.pseudo_qconst3_weights_cifar10.txt',   % 第五层卷积
                'tfl.pseudo_qconst1_weights_cifar10.txt'    % 全连接层
            };
            
            for i = 1:length(weight_files)
                weight_path = fullfile(obj.model_dir, weight_files{i});
                if ~exist(weight_path, 'file')
                    error('Cannot find weight file: %s', weight_path);
                end
                [weights, shape] = obj.load_weight_file(weight_path);
                obj.weights.(sprintf('layer_%d', i)) = struct('weights', weights, 'shape', shape);
            end
        end
        
        function [weights, shape] = load_weight_file(~, filename)
            % 读取权重文件
            fid = fopen(filename, 'r');
            if fid == -1
                error('Cannot open file: %s', filename);
            end
            
            try
                % 读取头部信息
                layer_line = fgetl(fid);
                shape_line = fgetl(fid);
                weights_line = fgetl(fid);
                
                % 解析shape
                shape_str = regexp(shape_line, 'Shape: \[(.*?)\]', 'tokens');
                if isempty(shape_str)
                    shape_str = regexp(shape_line, 'Shape: \((.*?)\)', 'tokens');
                end
                
                if ~isempty(shape_str)
                    % 移除所有空格和括号
                    shape_str = strrep(shape_str{1}{1}, ' ', '');
                    shape_str = strrep(shape_str, '(', '');
                    shape_str = strrep(shape_str, ')', '');
                    % 分割数字
                    shape_parts = regexp(shape_str, ',', 'split');
                    % 过滤空字符串
                    shape_parts = shape_parts(~cellfun('isempty', shape_parts));
                    
                    % 转换为数值数组
                    shape = zeros(1, length(shape_parts));
                    for i = 1:length(shape_parts)
                        shape(i) = str2double(shape_parts{i});
                    end
                    
                    % 处理单维度的情况
                    if length(shape) == 1
                        shape = [1, shape(1)];  % 将一维向量转换为2D形状
                    end
                else
                    error('Invalid shape format in file: %s', filename);
                end
                
                % 读取权重
                weights = [];
                while ~feof(fid)
                    line = fgetl(fid);
                    if ~isempty(line)
                        % 移除.000000后缀并转换为数值
                        line = strtrim(line);  % 移除前后空格
                        line = regexprep(line, '\.000000$', '');  % 移除.000000后缀
                        val = str2double(line);
                        if ~isnan(val)
                            weights(end+1) = val;
                        end
                    end
                end
                
                % 检查权重数量是否匹配shape
                expected_size = prod(shape);
                if length(weights) ~= expected_size
                    error('Weight count (%d) does not match shape %s (expected %d) in file: %s', ...
                        length(weights), mat2str(shape), expected_size, filename);
                end
                
                % 重塑权重维度
                weights = reshape(weights, shape);
                
                % 如果是单维度，去掉多余的维度
                if shape(1) == 1 && length(shape) == 2
                    weights = squeeze(weights);
                end
                
                % 打印调试信息
                fprintf('Loaded weights from %s\n', filename);
                fprintf('  Shape: %s\n', mat2str(shape));
                fprintf('  Weight range: [%f, %f]\n', min(weights(:)), max(weights(:)));
                fprintf('  Weight count: %d\n\n', numel(weights));
                
            catch ME
                fclose(fid);
                rethrow(ME);
            end
            
            fclose(fid);
        end
        
        function details = parse_tensor_details(~, filename)
            % 读取并解析tensor details文件
            fid = fopen(filename, 'r');
            if fid == -1
                error('Cannot open file: %s', filename);
            end
            
            content = fscanf(fid, '%c', inf);
            fclose(fid);
            
            % 将内容按照记录分割
            records = regexp(content, '{[^}]+}', 'match');
            
            details = struct();
            for i = 1:length(records)
                % 解析每条记录
                record = records{i};
                
                % 提取name
                name_match = regexp(record, '''name'': ''([^'']+)''', 'tokens');
                if ~isempty(name_match)
                    name = name_match{1}{1};
                    
                    % 提取shape
                    shape_match = regexp(record, '''shape'': array\(\[(.*?)\]\)', 'tokens');
                    if ~isempty(shape_match)
                        shape_str = shape_match{1}{1};
                        % 移除所有空格
                        shape_str = strrep(shape_str, ' ', '');
                        % 分割数字
                        shape_parts = regexp(shape_str, ',', 'split');
                        shape = zeros(1, length(shape_parts));
                        for j = 1:length(shape_parts)
                            shape(j) = str2double(shape_parts{j});
                        end
                    else
                        shape = [1, 1]; % 默认shape
                    end
                    
                    % 提取quantization参数
                    quant_match = regexp(record, '''quantization'': \(([\d\.-]+),\s*([\d\.-]+)\)', 'tokens');
                    if ~isempty(quant_match)
                        scale = str2double(quant_match{1}{1});
                        zero_point = str2double(quant_match{1}{2});
                    else
                        % 如果找不到quantization参数，尝试从quantization_parameters中提取
                        scale_match = regexp(record, '''scales'':\s*array\(\[([\d\.-e,\s]+)\]', 'tokens');
                        if ~isempty(scale_match)
                            scale_str = scale_match{1}{1};
                            scale_parts = regexp(scale_str, ',', 'split');
                            scale = str2double(scale_parts{1}); % 使用第一个scale值
                        else
                            scale = 1.0;
                        end
                        
                        zp_match = regexp(record, '''zero_points'':\s*array\(\[([\d\.-e,\s]+)\]', 'tokens');
                        if ~isempty(zp_match)
                            zp_str = zp_match{1}{1};
                            zp_parts = regexp(zp_str, ',', 'split');
                            zero_point = str2double(zp_parts{1}); % 使用第一个zero_point值
                        else
                            zero_point = 0;
                        end
                    end
                    
                    % 生成有效的MATLAB变量名
                    valid_name = matlab.lang.makeValidName(name);
                    
                    % 存储到结构体
                    details.(valid_name) = struct(...
                        'shape', shape, ...
                        'scale', scale, ...
                        'zero_point', zero_point);
                    
                    % 打印调试信息
                    fprintf('Parsed layer: %s\n', valid_name);
                    fprintf('  Shape: [%s]\n', sprintf('%d ', shape));
                    fprintf('  Scale: %f\n', scale);
                    fprintf('  Zero point: %d\n\n', zero_point);
                end
            end
        end
        
        function output = predict(obj, input)
            try
                % 确保输入是int8类型
                assert(isa(input, 'int8'), 'Input must be int8');
                assert(all(size(input) == [32, 32, 3]), 'Input must be 32x32x3');
                
                fprintf('\n=== Starting prediction ===\n');
                fprintf('Input shape: %s\n', mat2str(size(input)));
                fprintf('Input range: [%d, %d]\n\n', min(input(:)), max(input(:)));
                
                % 添加batch维度
                x = reshape(input, [1, 32, 32, 3]);
                
                % 第一个深度可分离卷积块
                fprintf('=== Block 1 ===\n');
                fprintf('Depthwise Conv2D...\n');
                x = obj.depthwise_conv2d(x, 1);
                fprintf('Batch Normalization...\n');
                x = obj.batch_normalize(x, 1);
                fprintf('Pointwise Conv2D...\n');
                x = obj.pointwise_conv2d(x, 1);
                fprintf('Block 1 output range: [%d, %d]\n\n', min(x(:)), max(x(:)));
                
                % 第二个深度可分离卷积块 (stride=2)
                fprintf('=== Block 2 ===\n');
                fprintf('Depthwise Conv2D (stride=2)...\n');
                x = obj.depthwise_conv2d(x, 2);
                fprintf('Batch Normalization...\n');
                x = obj.batch_normalize(x, 2);
                fprintf('Pointwise Conv2D...\n');
                x = obj.pointwise_conv2d(x, 2);
                fprintf('Block 2 output range: [%d, %d]\n\n', min(x(:)), max(x(:)));
                
                % 全局平均池化
                fprintf('=== Global Average Pooling ===\n');
                x = obj.global_average_pooling(x);
                fprintf('GAP output range: [%d, %d]\n\n', min(x(:)), max(x(:)));
                
                % 全连接层
                fprintf('=== Fully Connected Layer ===\n');
                output = obj.fully_connected(x);
                
                % 最终输出
                fprintf('\n=== Final Output ===\n');
                fprintf('Shape: %s\n', mat2str(size(output)));
                fprintf('Range: [%d, %d]\n', min(output(:)), max(output(:)));
                fprintf('Values: %s\n', mat2str(output));
                
            catch ME
                fprintf('Error in prediction: %s\n', ME.message);
                fprintf('Stack trace:\n');
                disp(ME.stack);
                output = [];
            end
        end
        
        function out = depthwise_conv2d(obj, input, layer_idx)
            try
                % 获取该层的权重和量化参数
                weights = obj.weights.(sprintf('layer_%d', layer_idx)).weights;
                
                % 获取正确的tensor details名称
                if layer_idx == 1
                    input_layer_name = 'serving_default_input_layer_0';
                    weight_layer_name = 'tfl_pseudo_qconst33';
                    output_layer_name = 'sequential_1_depthwise_conv2d_1_Relu_sequential_1_depthwise_con';
                elseif layer_idx == 2
                    input_layer_name = 'sequential_1_batch_normalization_1_batchnorm_add_1';
                    weight_layer_name = 'tfl_pseudo_qconst29';
                    output_layer_name = 'sequential_1_depthwise_conv2d_1_2_Relu_sequential_1_depthwise_c';
                elseif layer_idx == 3
                    input_layer_name = 'sequential_1_batch_normalization_1_2_batchnorm_add_1';
                    weight_layer_name = 'tfl_pseudo_qconst17';
                    output_layer_name = 'sequential_1_depthwise_conv2d_2_1_Relu_sequential_1_depthwise_c';
                else
                    error('Unsupported layer index: %d', layer_idx);
                end
                
                % 获取量化参数
                input_params = obj.tensor_details.(input_layer_name);
                weight_params = obj.tensor_details.(weight_layer_name);
                output_params = obj.tensor_details.(output_layer_name);
                
                % 获取输入维度
                [batch_size, in_height, in_width, in_channels] = size(input);
                [kernel_h, kernel_w, channels, ~] = size(weights);
                
                % 确定stride
                if layer_idx == 2
                    stride = 2;
                else
                    stride = 1;
                end
                
                % 计算padding
                pad_h = floor((kernel_h - 1) / 2);
                pad_w = floor((kernel_w - 1) / 2);
                
                % 计算输出维度
                out_height = floor((in_height + 2*pad_h - kernel_h) / stride) + 1;
                out_width = floor((in_width + 2*pad_w - kernel_w) / stride) + 1;
                
                % 初始化输出 (保持输入通道数)
                out = zeros(batch_size, out_height, out_width, in_channels, 'int32');
                
                % 对输入padding
                padded_input = int32(padarray(input, [pad_h pad_w], 0, 'both'));
                
                % 打印调试信息
                fprintf('Debug info:\n');
                fprintf('  Padded input shape: %s\n', mat2str(size(padded_input)));
                fprintf('  Kernel shape: %s\n', mat2str(size(weights)));
                fprintf('  Stride: %d\n', stride);
                
                % 打印量化参数
                fprintf('Quantization parameters:\n');
                fprintf('  Input: scale=%f, zero_point=%d\n', input_params.scale, input_params.zero_point);
                fprintf('  Weight: scale=%f, zero_point=%d\n', weight_params.scale, weight_params.zero_point);
                fprintf('  Output: scale=%f, zero_point=%d\n', output_params.scale, output_params.zero_point);
                
                % 执行深度卷积
                for b = 1:batch_size
                    for c = 1:in_channels
                        for i = 1:out_height
                            for j = 1:out_width
                                % 计算起始位置
                                start_i = (i-1)*stride + 1;
                                start_j = (j-1)*stride + 1;
                                
                                % 确保索引不会越界
                                end_i = min(start_i+kernel_h-1, size(padded_input,2));
                                end_j = min(start_j+kernel_w-1, size(padded_input,3));
                                
                                if start_i > end_i || start_j > end_j
                                    continue;  % 跳过无效的位置
                                end
                                
                                % 提取窗口和对应的卷积核
                                window = padded_input(b, start_i:end_i, start_j:end_j, c);
                                kernel = int32(weights(1:end_i-start_i+1, 1:end_j-start_j+1, c, 1));
                                
                                % 反量化输入
                                window_float = (double(window) - double(input_params.zero_point)) * input_params.scale;
                                
                                % 权重已经是正确的浮点值
                                kernel_float = double(kernel);
                                
                                % 执行卷积
                                conv_result = sum(window_float(:) .* kernel_float(:));
                                
                                % 量化到输出范围
                                % 注意：需要考虑输出的zero_point和scale
                                quantized_result = int32(round(conv_result / output_params.scale) + output_params.zero_point);
                                
                                % 确保结果在int8范围内
                                quantized_result = min(max(quantized_result, -128), 127);
                                
                                % 应用ReLU
                                if quantized_result < output_params.zero_point
                                    quantized_result = output_params.zero_point;
                                end
                                
                                out(b,i,j,c) = int8(quantized_result);
                            end
                        end
                    end
                end
                
                % 打印中间值以便调试
                fprintf('Convolution debug info:\n');
                fprintf('  Input range: [%f, %f]\n', min(window_float(:)), max(window_float(:)));
                fprintf('  Kernel range: [%f, %f]\n', min(kernel_float(:)), max(kernel_float(:)));
                fprintf('  Conv result range: [%f, %f]\n', min(conv_result(:)), max(conv_result(:)));
                
                % 限制在int8范围内
                out = int8(min(max(out, -128), 127));
                
                % 添加ReLU激活
                out = max(out, int8(0));
                
                % 打印调试信息
                fprintf('Depthwise conv2d layer %d:\n', layer_idx);
                fprintf('  Input shape: %s\n', mat2str(size(input)));
                fprintf('  Weight shape: %s\n', mat2str(size(weights)));
                fprintf('  Output shape: %s\n', mat2str(size(out)));
                fprintf('  Output range: [%d, %d]\n\n', min(out(:)), max(out(:)));
                
            catch ME
                warning('Error in depthwise_conv2d: %s\nReturning input unchanged.', ME.message);
                out = input;
            end
        end
        
        function out = batch_normalize(obj, input, layer_idx)
            try
                % 获取正确的tensor details名称
                if layer_idx == 1
                    input_layer_name = 'sequential_1_depthwise_conv2d_1_Relu_sequential_1_depthwise_con';
                    output_layer_name = 'sequential_1_batch_normalization_1_batchnorm_add_1';
                elseif layer_idx == 2
                    input_layer_name = 'sequential_1_depthwise_conv2d_1_2_Relu_sequential_1_depthwise_c';
                    output_layer_name = 'sequential_1_batch_normalization_1_2_batchnorm_add_1';
                else
                    error('Unsupported batch norm layer index: %d', layer_idx);
                end
                
                % 获取量化参数
                input_params = obj.tensor_details.(input_layer_name);
                output_params = obj.tensor_details.(output_layer_name);
                
                % 打印调试信息
                fprintf('Batch normalization layer %d:\n', layer_idx);
                fprintf('  Input shape: %s\n', mat2str(size(input)));
                
                % 反量化输入
                input_float = (double(input) - double(input_params.zero_point)) * input_params.scale;
                
                % 执行batch normalization
                % 注意：在推理时，batch norm实际上是线性变换
                % out = gamma * (x - mean) / sqrt(var + epsilon) + beta
                % 这些参数在训练后都是固定的，所以可以合并为：
                % out = alpha * x + beta
                
                % 这里我们简化处理，仅应用缩放
                out_float = input_float;  % 临时简化，实际应该应用真实的BN参数
                
                % 量化输出
                out = int8(min(max(round(out_float / output_params.scale) + output_params.zero_point, -128), 127));
                
                % 打印调试信息
                fprintf('  Output shape: %s\n', mat2str(size(out)));
                fprintf('  Output range: [%d, %d]\n\n', min(out(:)), max(out(:)));
                
            catch ME
                warning('Error in batch_normalize: %s\nReturning input unchanged.', ME.message);
                out = input;
            end
        end
        
        function out = pointwise_conv2d(obj, input, layer_idx)
            try
                % 获取该层的权重和量化参数
                weights = obj.weights.(sprintf('layer_%d', layer_idx)).weights;
                
                % 获取正确的tensor details名称
                if layer_idx == 1
                    input_layer_name = 'sequential_1_batch_normalization_1_batchnorm_add_1';
                    weight_layer_name = 'tfl_pseudo_qconst29';
                    output_layer_name = 'sequential_1_conv2d_1_Relu_sequential_1_conv2d_1_add_sequential';
                elseif layer_idx == 2
                    input_layer_name = 'sequential_1_batch_normalization_1_2_batchnorm_add_1';
                    weight_layer_name = 'tfl_pseudo_qconst17';
                    output_layer_name = 'sequential_1_conv2d_1_2_Relu_sequential_1_conv2d_1_2_add_sequen';
                else
                    error('Unsupported pointwise conv layer index: %d', layer_idx);
                end
                
                % 获取量化参数
                input_params = obj.tensor_details.(input_layer_name);
                weight_params = obj.tensor_details.(weight_layer_name);
                output_params = obj.tensor_details.(output_layer_name);
                
                % 获取输入维度
                [batch_size, in_height, in_width, in_channels] = size(input);
                [out_channels, ~, ~, ~] = size(weights);
                
                % 初始化输出
                out = zeros(batch_size, in_height, in_width, out_channels, 'int32');
                
                % 打印调试信息
                fprintf('Pointwise conv2d layer %d:\n', layer_idx);
                fprintf('  Input shape: %s\n', mat2str(size(input)));
                fprintf('  Weight shape: %s\n', mat2str(size(weights)));
                
                % 执行pointwise卷积
                for b = 1:batch_size
                    for h = 1:in_height
                        for w = 1:in_width
                            for oc = 1:out_channels
                                % 获取输入和权重
                                in_pixel = squeeze(input(b,h,w,:));
                                weight = squeeze(weights(oc,:,:,:));
                                
                                % 反量化
                                in_float = (double(in_pixel) - double(input_params.zero_point)) * input_params.scale;
                                weight_float = double(weight);  % 权重已经是float
                                
                                % 计算卷积
                                conv_result = sum(in_float(:) .* weight_float(:));
                                
                                % 量化结果
                                out(b,h,w,oc) = int32(round(conv_result / output_params.scale) + output_params.zero_point);
                            end
                        end
                    end
                end
                
                % 限制在int8范围内
                out = int8(min(max(out, -128), 127));
                
                % 添加ReLU激活
                out = max(out, int8(0));
                
                % 打印调试信息
                fprintf('  Output shape: %s\n', mat2str(size(out)));
                fprintf('  Output range: [%d, %d]\n\n', min(out(:)), max(out(:)));
                
            catch ME
                warning('Error in pointwise_conv2d: %s\nReturning input unchanged.', ME.message);
                out = input;
            end
        end
        
        function out = global_average_pooling(obj, input)
            try
                % 获取量化参数
                input_params = obj.tensor_details.sequential_1_conv2d_4_1_Relu_sequential_1_conv2d_4_1_add_sequen;
                output_params = obj.tensor_details.sequential_1_global_average_pooling2d_1_Mean;
                
                % 反量化输入
                input_float = (double(input) - double(input_params.zero_point)) * input_params.scale;
                
                % 执行全局平均池化
                [batch_size, height, width, channels] = size(input_float);
                out_float = zeros(batch_size, channels);
                
                for b = 1:batch_size
                    for c = 1:channels
                        channel_data = input_float(b,:,:,c);
                        out_float(b,c) = sum(channel_data(:)) / (height * width);
                    end
                end
                
                % 调整输出范围
                fprintf('  Float range before scaling: [%f, %f]\n', min(out_float(:)), max(out_float(:)));
                
                % 计算缩放因子以避免饱和
                max_abs_val = max(abs(out_float(:)));
                scale_factor = 1.0;
                if max_abs_val > 0
                    scale_factor = (127.0 / max_abs_val) * 0.9;  % 留出10%的余量
                end
                out_float = out_float * scale_factor;
                
                % 量化输出
                scaled = out_float / output_params.scale;
                fprintf('  Scaled range: [%f, %f]\n', min(scaled(:)), max(scaled(:)));
                
                quantized = round(scaled) + output_params.zero_point;
                out = int8(min(max(quantized, -128), 127));
                
                fprintf('  Output shape: %s\n', mat2str(size(out)));
                fprintf('  Output range: [%d, %d]\n\n', min(out(:)), max(out(:)));
            catch ME
                warning('Error in global_average_pooling: %s', ME.message);
                out = input;
            end
        end
        
        function out = fully_connected(obj, input)
            try
                % 获取参数
                weights = obj.weights.layer_6.weights;
                input_params = obj.tensor_details.sequential_1_global_average_pooling2d_1_Mean;
                output_params = obj.tensor_details.sequential_1_dense_1_2_MatMul_sequential_1_dense_1_2_Add;
                
                % 反量化输入
                input_float = (double(input) - double(input_params.zero_point)) * input_params.scale;
                
                % 反量化权重 (使用对称量化)
                weight_float = double(weights);
                weight_max = max(abs(weight_float(:)));
                weight_scale = weight_max / 127.0;  % 对称量化
                weight_float = weight_float * weight_scale;
                
                % 矩阵乘法
                out_float = input_float * weight_float';
                
                % 打印调试信息
                fprintf('FC debug:\n');
                fprintf('  Input shape: %s\n', mat2str(size(input)));
                fprintf('  Weight shape: %s\n', mat2str(size(weight_float)));
                fprintf('  Input range: [%d, %d]\n', min(input(:)), max(input(:)));
                fprintf('  Dequantized input range: [%f, %f]\n', min(input_float(:)), max(input_float(:)));
                fprintf('  Weight scale: %f\n', weight_scale);
                fprintf('  Weight range: [%f, %f]\n', min(weight_float(:)), max(weight_float(:)));
                fprintf('  Pre-quant range: [%f, %f]\n', min(out_float(:)), max(out_float(:)));
                
                % 量化输出
                scaled = out_float / output_params.scale;
                fprintf('  Scaled range: [%f, %f]\n', min(scaled(:)), max(scaled(:)));
                
                % 量化并限制范围
                quantized = round(scaled) + output_params.zero_point;
                out = int8(min(max(quantized, -128), 127));
                
                fprintf('  Output shape: %s\n', mat2str(size(out)));
                fprintf('  Final range: [%d, %d]\n', min(out(:)), max(out(:)));
                
            catch ME
                warning('Error in FC: %s', ME.message);
                out = input;
            end
        end
    end
end 