uTensor icon indicating copy to clipboard operation
uTensor copied to clipboard

include header order

Open dboyliao opened this issue 8 years ago • 2 comments

main.cpp:

#include "linreg_ctx.hpp"
#include "tensorIdxImporter.hpp"
#include "uTensor_util.hpp"
#include "test.hpp"
#include <mbed.h>
#include <FATFileSystem.h>
#include <SDBlockDevice.h>


class LinregTest : public Test {
    Context ctx;
    TensorIdxImporter t_import;
public:
    void runAll(void);
};

Serial pc(USBTX, USBRX, 115200);
SDBlockDevice bd(MBED_CONF_APP_SD_MOSI, MBED_CONF_APP_SD_MISO,
                 MBED_CONF_APP_SD_CLK, MBED_CONF_APP_SD_CS);
FATFileSystem fs("fs");


int main(int argc, char* argv[]) {
    ON_ERR(bd.init(), "SDBlockDevice init ");
    ON_ERR(fs.mount(&bd), "Mounting the filesystem on \"/fs\". ");

    LinregTest test;
    test.runAll();
    test.printSummary();

    ON_ERR(fs.unmount(), "fs unmount ");
    ON_ERR(bd.deinit(), "SDBlockDevice de-init ");

    return 0;
}

void LinregTest::runAll(void) {
    testStart("simple quantized matmul test");
    timer_start();
    get_test_quant_linreg_ctx(ctx);
    S_TENSOR yhat = ctx.get("yhat:0");
    ctx.eval();
    timer_stop();

    Tensor* ref_yhat = t_import.float_import("/fs/idx_data/output_yhat.idx");

    // compare the results
    double percErr = meanPercentErr<float>(ref_yhat, yhat.get());
    printf("percErr: %f (< 0.003)\n", percErr);
    passed(percErr < 0.003);
}

linreg_ctx.hpp:

#ifndef __LINREG_CTX_H
#define __LINREG_CTX_H
#include "context.hpp"

void get_test_quant_linreg_ctx(Context& ctx);

#endif // __LINREG_CTX_H

linreg_ctx.cpp:

#include "MatrixOps.hpp"
#include "linreg_ctx.hpp"
#include "tensor.hpp"
#include "context.hpp"
#include "tensorIdxImporter.hpp"
#include "ArrayOps.hpp"
#include "MathOps.hpp"



void get_test_quant_linreg_ctx(Context& ctx) {



{
    TensorIdxImporter t_import;
    ctx.add(t_import.ubyte_import("/fs/idx_data/X_quint8_const_0.idx"), "X_quint8_const:0", 0);
    
}


{
    TensorIdxImporter t_import;
    ctx.add(t_import.float_import("/fs/idx_data/X_min_0.idx"), "X_min:0", 0);
    
}


{
    TensorIdxImporter t_import;
    ctx.add(t_import.float_import("/fs/idx_data/X_max_0.idx"), "X_max:0", 0);
    
}


{
    TensorIdxImporter t_import;
    ctx.add(t_import.ubyte_import("/fs/idx_data/W_quint8_const_0.idx"), "W_quint8_const:0", 0);
    
}


{
    TensorIdxImporter t_import;
    ctx.add(t_import.float_import("/fs/idx_data/W_min_0.idx"), "W_min:0", 0);
    
}


{
    TensorIdxImporter t_import;
    ctx.add(t_import.float_import("/fs/idx_data/W_max_0.idx"), "W_max:0", 0);
    
}


{
    TensorIdxImporter t_import;
    ctx.add(t_import.float_import("/fs/idx_data/b_0.idx"), "b:0", 0);
    
}

{
    ctx.add(new RamTensor<int>(), "MatMul_eightbit_quantized_mat_mul:0");
    ctx.add(new RamTensor<float>({1}), "MatMul_eightbit_quantized_mat_mul:1");
    ctx.add(new RamTensor<float>({1}), "MatMul_eightbit_quantized_mat_mul:2");
    ctx.push(new QntMatMulOp<uint8_t, uint8_t, int>(), 
             { "X_quint8_const:0", "X_min:0", "X_max:0", "W_quint8_const:0", "W_min:0",  "W_max:0" },
             { "MatMul_eightbit_quantized_mat_mul:0", "MatMul_eightbit_quantized_mat_mul:1",  "MatMul_eightbit_quantized_mat_mul:2" });
}

{
    ctx.add(new RamTensor<float>({1}), "MatMul_eightbit_requant_range:0");
    ctx.add(new RamTensor<float>({1}), "MatMul_eightbit_requant_range:1");
    ctx.push(new Requantization_RangeOp(),
             { "MatMul_eightbit_quantized_mat_mul:0", "MatMul_eightbit_quantized_mat_mul:1", "MatMul_eightbit_quantized_mat_mul:2" },
             { "MatMul_eightbit_requant_range:0", "MatMul_eightbit_requant_range:1" });
}

{
    ctx.add(new RamTensor<uint8_t>(), "MatMul_eightbit_requantize:0");
    ctx.add(new RamTensor<float>({1}), "MatMul_eightbit_requantize:1");
    ctx.add(new RamTensor<float>({1}), "MatMul_eightbit_requantize:2");
    ctx.push(new RequantizeOp(),
             { "MatMul_eightbit_quantized_mat_mul:0", "MatMul_eightbit_quantized_mat_mul:1", "MatMul_eightbit_quantized_mat_mul:2", "MatMul_eightbit_requant_range:0", "MatMul_eightbit_requant_range:1" },
             { "MatMul_eightbit_requantize:0", "MatMul_eightbit_requantize:1", "MatMul_eightbit_requantize:2" });
}

{
    ctx.add(new RamTensor<float>(), "MatMul:0");
    ctx.push(new DequantizeOp(), 
             { "MatMul_eightbit_requantize:0", "MatMul_eightbit_requantize:1", "MatMul_eightbit_requantize:2" },
             { "MatMul:0" });
}

{
    ctx.add(new RamTensor<float>(), "yhat:0");
    ctx.push(new AddOp<float, float>(),
             { "MatMul:0", "b:0" }, 
             { "yhat:0" });
}

}

dboyliao avatar Dec 08 '17 15:12 dboyliao

These files can not be compiled in uTensor. However, if we change the include header order in linreg_ctx.cpp:

#include "linreg_ctx.hpp"    // this line
#include "MatrixOps.hpp"     // and this line
#include "tensor.hpp"
#include "context.hpp"
#include "tensorIdxImporter.hpp"
#include "ArrayOps.hpp"
#include "MathOps.hpp"
...

Then it compiles.

dboyliao avatar Dec 08 '17 15:12 dboyliao

I found that the key is the order of context.hpp and MatrixOps.hpp.

dboyliao avatar Dec 08 '17 15:12 dboyliao