Hello

git clone https://github.com/tencent/ncnn
cd ncnn
git checkout 7b4e77671a4457a414b60cee5425758212e725cf
mkdir build
cd build
cmake -DCMAKE_PREFIX_PATH=/ceph-fj/fangjun/software/protobuf-3.20.1-cmake ..

We have to make the following changes:

./code/hello/7b4e77.diff
 1diff --git a/tools/CMakeLists.txt b/tools/CMakeLists.txt
 2index 0b710050..e1a5b3d0 100644
 3--- a/tools/CMakeLists.txt
 4+++ b/tools/CMakeLists.txt
 5@@ -8,6 +8,7 @@ include_directories(${CMAKE_CURRENT_BINARY_DIR})
 6 protobuf_generate_cpp(CAFFE_PROTO_SRCS CAFFE_PROTO_HDRS caffe.proto)
 7
 8 add_executable(caffe2ncnn caffe2ncnn.cpp ${CAFFE_PROTO_SRCS} ${CAFFE_PROTO_HDRS})
 9+include_directories(${Protobuf_INCLUDE_DIR})
10
11 target_link_libraries(caffe2ncnn ${PROTOBUF_LIBRARIES})
12
13diff --git a/tools/caffe2ncnn.cpp b/tools/caffe2ncnn.cpp
14index 0eff756a..229cc653 100644
15--- a/tools/caffe2ncnn.cpp
16+++ b/tools/caffe2ncnn.cpp
17@@ -193,7 +193,7 @@ static bool read_proto_from_binary(const char* filepath, google::protobuf::Messa
18     google::protobuf::io::IstreamInputStream input(&fs);
19     google::protobuf::io::CodedInputStream codedstr(&input);
20
21-    codedstr.SetTotalBytesLimit(INT_MAX, INT_MAX / 2);
22+    codedstr.SetTotalBytesLimit(INT_MAX);
23
24     bool success = message->ParseFromCodedStream(&codedstr);

To install the Python package:

cd ncnn
mkdir build
cd build
cmake ..
make -j
cd ..
pip install .

test-net

./code/hello/test-net.cc
#include "datareader.h"
#include "layer/relu.h"
#include "layer_type.h"
#include "net.h"
#include <fstream>
#include <iostream>

#ifndef MY_CHECK
#define MY_CHECK(x, y)                                                         \
  do {                                                                         \
    if (x != y) {                                                              \
      NCNN_LOGE(#x " != " #y);                                                 \
      exit(EXIT_FAILURE);                                                      \
    }                                                                          \
  } while (0)
#endif

class DataReaderFromEmpty : public ncnn::DataReader {
public:
  virtual int scan(const char *format, void *p) const { return 0; }
  virtual size_t read(void *buf, size_t size) const {
    memset(buf, 0, size);
    return size;
  }
};

ncnn::Layer *MyLayerCreator(void * /*userdata*/) { return new ncnn::ReLU(); }
void MyLayerDestoryer(ncnn::Layer *layer, void * /*userdata*/) { delete layer; }

static void TestCustomLayer() {
  static const char *s = R"(
7767517
2 2
Input        data                            0 1 data
MyLayer      my_layer                        1 1 data out
)";
  std::ofstream of("filename.param");
  of << s;
  of.close();

  ncnn::Net net;
  net.register_custom_layer("MyLayer", MyLayerCreator, MyLayerDestoryer,
                            nullptr);
  // This is the first custom layer, so its index is 0
  MY_CHECK(net.custom_layer_to_index("MyLayer"), 0);

  DataReaderFromEmpty dr;

  // return 0 on success
  // What does load_param() do?
  // If we need to set options, we have to set it before calling load_param()
  // (1) Read the magic number
  // (2) Allocate space for layers and blobs
  auto ret = net.load_param("filename.param");
  MY_CHECK(ret, 0);

  // load_model() is optional if there are no parameters to read
  // ret = net.load_model(dr);
  // MY_CHECK(ret, 0);

  ncnn::Mat m(2, 3);
  float *p = m;
  for (int32_t i = 0; i != m.total(); ++i) {
    p[i] = i - float(m.total()) / 2;
  }

  for (int32_t i = 0; i != m.total(); ++i) {
    std::cout << p[i] << ", ";
  }
  std::cout << "\n";

  ncnn::Extractor ex = net.create_extractor();
  ex.input("data", m);
  ncnn::Mat out;
  ex.extract("out", out);
  p = out;
  for (int32_t i = 0; i != out.total(); ++i) {
    std::cout << p[i] << ", ";
  }
  std::cout << "\n";
}

/*
ncnn::Net uses pimpl

std::vector<Layer*> layers;
std::vector<Blob> blobs;
 */
void TestNet() {
  TestCustomLayer();
  NCNN_LOGE("Test net\n");
  /*
  77675157 ->  magic number
  3 3 -> layer_count, blob_count

Input            data                             0 1 data
->
layer type: Input
Layer name: data
bottom count: 0
top count: 1
It uses ncnn::create_layer(layer_type) to create a layer

If create_layer() returns a nullptr, it invokes
create_custom_layer(layer_type)

note: create_layer() is a global function, while create_custom_layer()
is a method of `ncnn::Net`.

It will also invoke layer->load_param(paramdict);

Input layer has index 0 since it is the first row
0 1 data -> data is the output blob of Input layer. It has index 0 since
it is the first blob


   */
  static const char *s = R"(
7767517
3 3
Input            data                             0 1 data
Convolution      conv0_fwd                        1 1 data conv0_fwd 0=3 1=3 11=3 2=1 12=1 3=1 13=1 4=0 14=0 5=1 6=81
InnerProduct     dense0_fwd                       1 1 conv0_fwd output 0=1 1=1 2=151875
)";
  std::ofstream of("filename.param");
  of << s;
  of.close();

  ncnn::Net net;
  DataReaderFromEmpty dr;

  // return 0 on success
  // What does load_param() do?
  // If we need to set options, we have to set it before calling load_param()
  // (1) Read the magic number
  // (2) Allocate space for layers and blobs
  auto ret = net.load_param("filename.param");
  MY_CHECK(ret, 0);

  ret = net.load_model(dr);
  MY_CHECK(ret, 0);

  MY_CHECK(net.blobs().size(), 3);
  MY_CHECK(net.layers().size(), 3);

  // test layer 0
  // every layer has a type
  MY_CHECK(net.layers()[0]->type, "Input");
  MY_CHECK(net.layers()[0]->typeindex, ncnn::LayerType::Input);

  // every layer has a name
  MY_CHECK(net.layers()[0]->name, "data");

  // every layer has an input vector
  MY_CHECK(net.layers()[0]->bottoms.size(), 0);

  // every layer has an output vector
  MY_CHECK(net.layers()[0]->tops.size(), 1);
  // indexes in tops are indexes into the global blobs vector
  MY_CHECK(net.layers()[0]->tops[0], 0);

  // every blob has a name
  MY_CHECK(net.blobs()[0].name, "data");

  // every blob has a producer (layer index)
  MY_CHECK(net.blobs()[0].producer, 0); // producer is layer 0

  // every blob has a consumer (layer index)
  MY_CHECK(net.blobs()[0].consumer, 1); // consumer is layer 1
}

test-param-dict

./code/hello/test-param-dict.cc
#include "paramdict.h"
#ifndef MY_CHECK
#define MY_CHECK(x, y)                                                         \
  do {                                                                         \
    if (x != y) {                                                              \
      NCNN_LOGE(#x " != " #y);                                                 \
      exit(EXIT_FAILURE);                                                      \
    }                                                                          \
  } while (0)
#endif

/*
It has an array of struct.
The size of the array is 32, so the maximum number of parameters is 32
 */
void TestParamDict() {
  NCNN_LOGE("Test param dict\n");
  ncnn::ParamDict pd;

  // int 2
  pd.set(0, 10);
  MY_CHECK(pd.type(0), 2);
  MY_CHECK(pd.get(0, 0), 10);

  // float 3
  pd.set(1, 10.5f);
  MY_CHECK(pd.type(1), 3);
  // Caution: The type of the default value (the second argument) is very
  // important. It determines which overload to invoke
  MY_CHECK(pd.get(1, 0.f), 10.5);

  // mat 4
  ncnn::Mat m(1);
  m[0] = 10.25;
  pd.set(2, m);
  MY_CHECK(pd.type(2), 4);
  MY_CHECK(pd.get(2, ncnn::Mat())[0], 10.25);
}

test-data-reader

./code/hello/test-data-reader.cc
#include "datareader.h"
#include "paramdict.h"
#include <iostream>

#ifndef MY_CHECK
#define MY_CHECK(x, y)                                                         \
  do {                                                                         \
    if (x != y) {                                                              \
      NCNN_LOGE(#x " != " #y);                                                 \
      exit(EXIT_FAILURE);                                                      \
    }                                                                          \
  } while (0)
#endif

class ParamDict2 : public ncnn::ParamDict {
public:
  int load_param2(const ncnn::DataReader &dr) { return load_param(dr); }

  int load_param_bin2(const ncnn::DataReader &dr) { return load_param_bin(dr); }
};

void TestDataReader() {
  NCNN_LOGE("Test data reader\n");
  int32_t a = 4;
  FILE *fp = fopen("a.bin", "wb");
  fwrite(&a, sizeof(a), 1, fp);

  float f = 1.25;
  fwrite(&f, sizeof(f), 1, fp);

  fclose(fp);

  fp = fopen("a.bin", "rb");

  ncnn::DataReaderFromStdio dr(fp);
  int32_t p;
  float q;
  dr.read(&p, sizeof(p));
  dr.read(&q, sizeof(q));
  MY_CHECK(p, a);
  MY_CHECK(q, f);
  fclose(fp);

  // datareader with param dict
  fp = fopen("a.txt", "w");
  fprintf(fp, "0=100 1=1.250000 -23303=5,0.125,0.25,0.50,0.625,1.0");
  fclose(fp);

  fp = fopen("a.txt", "r");
  ncnn::DataReaderFromStdio dr2(fp);
  ParamDict2 pd;
  pd.load_param2(dr2);
  MY_CHECK(pd.get(0, 0), 100);
  MY_CHECK(pd.get(1, 0.f), 1.25);
  ncnn::Mat m = pd.get(3, ncnn::Mat());
  MY_CHECK(m.w, 5);
  MY_CHECK(m[0], 0.125);
  MY_CHECK(m[1], 0.25);
  MY_CHECK(m[2], 0.50);
  MY_CHECK(m[3], 0.625);
  MY_CHECK(m[4], 1.0);

  fclose(fp);

  // for binary
  //
  //     binary 0
  //     binary 100
  //     binary 1
  //     binary 1.250000
  //     binary 3 | array_bit
  //     binary 5
  //     binary 0.1
  //     binary 0.2
  //     binary 0.4
  //     binary 0.8
  //     binary 1.0
  //     binary -233(EOP)
  fp = fopen("a.bin", "wb");
  int32_t id = 0;
  fwrite(&id, sizeof(id), 1, fp);
  int32_t val = 100;
  fwrite(&val, sizeof(val), 1, fp);

  id = 1;
  float val2 = 1.25;
  fwrite(&id, sizeof(id), 1, fp);
  fwrite(&val, sizeof(val2), 1, fp);

  id = -23303;
  fwrite(&id, sizeof(id), 1, fp);
  int32_t len = m.w;
  fwrite(&len, sizeof(len), 1, fp);
  fwrite(m.data, sizeof(float), m.w, fp);

  int32_t eop = -233;
  fwrite(&eop, sizeof(eop), 1, fp);
  fclose(fp);

  fp = fopen("a.bin", "rb");

  ncnn::DataReaderFromStdio dr3(fp);
  ParamDict2 pd3;
  pd3.load_param_bin2(dr3);
  fclose(fp);
  MY_CHECK(pd.get(0, 0), 100);

  // ncnn does not know that param 1 is a float. The user has to specify that!
  MY_CHECK(pd.get(1, 0.f), 1.25);

  ncnn::Mat m2 = pd.get(3, ncnn::Mat());
  MY_CHECK(m2.w, 5);
  MY_CHECK(m2[0], 0.125);
  MY_CHECK(m2[1], 0.25);
  MY_CHECK(m2[2], 0.50);
  MY_CHECK(m2[3], 0.625);
  MY_CHECK(m2[4], 1.0);
}