Load in C++
See https://pytorch.org/tutorials/advanced/cpp_export.html.
Load the saved foo.pt
in C++ from Export and ignore methods.
./code/load-in-cpp/Makefile
1USE_CXX11_ABI := $(shell python3 -c 'import torch; print(int(torch.compiled_with_cxx11_abi()))')
2TORCH_INSTALL_DIR := $(shell python3 -c 'import os; import torch; print(os.path.dirname(torch.__file__))')
3
4$(info USE_CXX11_ABI $(USE_CXX11_ABI))
5$(info TORCH_INSTALL_DIR $(TORCH_INSTALL_DIR))
6
7CXXFLAGS := -I$(TORCH_INSTALL_DIR)/include
8CXXFLAGS += -I$(TORCH_INSTALL_DIR)/include/torch/csrc/api/include
9CXXFLAGS += -I$(TORCH_INSTALL_DIR)/include/TH
10CXXFLAGS += -I$(TORCH_INSTALL_DIR)/include/THC
11CXXFLAGS += -std=c++14
12CXXFLAGS += -D_GLIBCXX_USE_CXX11_ABI=$(USE_CXX11_ABI)
13
14CXXFLAGS += -Wno-unknown-pragmas # disable omp warnings
15
16LDFLAGS := -L$(TORCH_INSTALL_DIR)/lib
17LDFLAGS += -lc10 -ltorch -ltorch_cpu
18# LDFLAGS += -lc10 -ltorch
19LDFLAGS += -Wl,-rpath,$(TORCH_INSTALL_DIR)/lib
20
21HAS_CUDA := $(shell python3 -c 'import torch; print("yes" if torch.cuda.is_available() else "no")')
22HAS_CUDA := yes
23$(info has cuda $(HAS_CUDA))
24
25ifeq ($(HAS_CUDA),yes)
26CUDA_HOME := $(shell which nvcc | xargs dirname | xargs dirname)
27CXXFLAGS += -I$(CUDA_HOME)/include
28LDFLAGS += -L$(CUDA_HOME)/lib64
29LDFLAGS += -lcudart -lc10_cuda -ltorch_cuda
30LDFLAGS += -Wl,-rpath,$(CUDA_HOME)/lib64
31endif
32
33.PHONY: clean
34
35main: main.o
36 $(CXX) -o $@ $< $(LDFLAGS)
37
38main.o: main.cc
39 $(CXX) $(CXXFLAGS) -c -o $@ $<
40
41clean:
42 $(RM) main.o main
Note
torch::jit::script::Module
is deprecated, use torch::jit::Module
instead.
./code/load-in-cpp/main.cc
1#include "torch/script.h"
2
3int main() {
4 // see torch/csrc/jit/module.h
5 torch::jit::Module m = torch::jit::load("../foo.pt");
6 std::cout << "is training: " << m.is_training() << "\n";
7 m.eval();
8 std::cout << "after m.eval(): is training: " << m.is_training() << "\n";
9 torch::Tensor x = torch::tensor({1, 2, 3}, torch::kFloat);
10 torch::Tensor y = m.run_method("baz", x).toTensor();
11 std::cout << y << "\n";
12
13 return 0;
14}
The output of make
is:
USE_CXX11_ABI 0
TORCH_INSTALL_DIR /ceph-fj/fangjun/software/py38/lib/python3.8/site-packages/torch
has cuda yes
g++ -I/ceph-fj/fangjun/software/py38/lib/python3.8/site-packages/torch/include \
-I/ceph-fj/fangjun/software/py38/lib/python3.8/site-packages/torch/include/torch/csrc/api/include \
-I/ceph-fj/fangjun/software/py38/lib/python3.8/site-packages/torch/include/TH \
-I/ceph-fj/fangjun/software/py38/lib/python3.8/site-packages/torch/include/THC \
-std=c++14 \
-D_GLIBCXX_USE_CXX11_ABI=0 \
-Wno-unknown-pragmas \
-I/ceph-sh1/fangjun/software/cuda-10.2.89/include \
-c -o main.o main.cc
g++ -o main main.o \
-L/ceph-fj/fangjun/software/py38/lib/python3.8/site-packages/torch/lib \
-lc10 -ltorch -ltorch_cpu \
-Wl,-rpath,/ceph-fj/fangjun/software/py38/lib/python3.8/site-packages/torch/lib \
-L/ceph-sh1/fangjun/software/cuda-10.2.89/lib64 \
-lcudart -lc10_cuda -ltorch_cuda \
-Wl,-rpath,/ceph-sh1/fangjun/software/cuda-10.2.89/lib64
The output of ./main
is:
is training: 1
after m.eval(): is training: 0
5
6
7
[ CPUFloatType{3} ]