Viam C++ SDK current
Loading...
Searching...
No Matches
mlmodel.hpp
1// Copyright 2023 Viam Inc.
2//
3// Licensed under the Apache License, Version 2.0 (the "License");
4// you may not use this file except in compliance with the License.
5// You may obtain a copy of the License at
6//
7// http://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS,
11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12// See the License for the specific language governing permissions and
13// limitations under the License.
14
15#pragma once
16
17#include <boost/mpl/joint_view.hpp>
18#include <boost/mpl/list.hpp>
19#include <boost/mpl/transform_view.hpp>
20#include <boost/variant/variant.hpp>
21
22#if defined(__has_include) && (__has_include(<xtensor/containers/xadapt.hpp>))
23#include <xtensor/containers/xadapt.hpp>
24#else
25#include <xtensor/xadapt.hpp>
26#endif
27
28#include <viam/sdk/common/utils.hpp>
29#include <viam/sdk/services/service.hpp>
30
31namespace viam {
32namespace sdk {
33
40class MLModelService : public Service {
41 private:
42 template <typename T>
43 struct make_tensor_view_ {
44 using shape_t = std::vector<std::size_t>;
45
46 using xt_no_ownership_t = decltype(xt::no_ownership());
47
48 using type = decltype(xt::adapt(std::declval<const T*>(),
49 std::declval<std::size_t>(),
50 std::declval<xt_no_ownership_t>(),
51 std::declval<shape_t>()));
52 };
53
54 public:
55 API api() const override;
56
57 template <typename T>
58 using tensor_view = typename make_tensor_view_<T>::type;
59
60 template <typename T>
61 static tensor_view<T> make_tensor_view(const T* data,
62 std::size_t size,
63 typename tensor_view<T>::shape_type shape) {
64 return xt::adapt(std::move(data), std::move(size), xt::no_ownership(), std::move(shape));
65 }
66
67 // Now that we have a factory for our tensor view types, use mpl
68 // to produce a variant over tensor views over the primitive types
69 // we care about, which are the signed and unsigned fixed width
70 // integral types and the two floating point types.
71 using signed_integral_base_types =
72 boost::mpl::list<std::int8_t, std::int16_t, std::int32_t, std::int64_t>;
73
74 using unsigned_integral_base_types =
75 boost::mpl::transform_view<signed_integral_base_types,
76 std::make_unsigned<boost::mpl::placeholders::_1>>;
77
78 using integral_base_types =
79 boost::mpl::joint_view<signed_integral_base_types, unsigned_integral_base_types>;
80
81 using fp_base_types = boost::mpl::list<float, double>;
82
83 using base_types = boost::mpl::joint_view<integral_base_types, fp_base_types>;
84
85 using tensor_view_types =
86 boost::mpl::transform_view<base_types, make_tensor_view_<boost::mpl::placeholders::_1>>;
87
88 // Union the tensor views for the various base types.
89 using tensor_views = boost::make_variant_over<tensor_view_types>::type;
90
91 // Our parameters to and from the model come as named tensor_views.
92 using named_tensor_views = std::unordered_map<std::string, tensor_views>;
93
100 inline std::shared_ptr<named_tensor_views> infer(const named_tensor_views& inputs) {
101 return infer(inputs, {});
102 }
103
112 virtual std::shared_ptr<named_tensor_views> infer(const named_tensor_views& inputs,
113 const ProtoStruct& extra) = 0;
114
115 struct tensor_info {
116 struct file {
117 std::string name;
118 std::string description;
119
120 enum : std::uint8_t {
121 k_label_type_tensor_value = 0,
122 k_label_type_tensor_axis = 1,
123 } label_type;
124 };
125
126 std::string name;
127 std::string description;
128
129 enum class data_types : std::uint8_t {
130 k_int8 = 0,
131 k_uint8 = 1,
132 k_int16 = 2,
133 k_uint16 = 3,
134 k_int32 = 4,
135 k_uint32 = 5,
136 k_int64 = 6,
137 k_uint64 = 7,
138 k_float32 = 8,
139 k_float64 = 9,
140 } data_type;
141
142 std::vector<int> shape;
143 std::vector<file> associated_files;
144
145 ProtoStruct extra;
146
147 static boost::optional<data_types> string_to_data_type(const std::string& str);
148 static const char* data_type_to_string(data_types data_type);
149
150 static data_types tensor_views_to_data_type(const tensor_views& view);
151 };
152
153 struct metadata {
154 std::string name;
155 std::string type;
156 std::string description;
157 std::vector<tensor_info> inputs;
158 std::vector<tensor_info> outputs;
159 };
160
162 inline struct metadata metadata() {
163 return metadata({});
164 }
165
169 virtual struct metadata metadata(const ProtoStruct& extra) = 0;
170
171 protected:
172 explicit MLModelService(std::string name);
173};
174
175template <>
177 static API api();
178};
179
180} // namespace sdk
181} // namespace viam
Definition resource_api.hpp:21
Represents a machine trained learning model instance.
Definition mlmodel.hpp:40
API api() const override
Returns the API associated with a particular resource.
std::shared_ptr< named_tensor_views > infer(const named_tensor_views &inputs)
Runs the model against the input tensors and returns inference results as tensors.
Definition mlmodel.hpp:100
struct metadata metadata()
Returns metadata describing the inputs and outputs of the model.
Definition mlmodel.hpp:162
virtual std::shared_ptr< named_tensor_views > infer(const named_tensor_views &inputs, const ProtoStruct &extra)=0
Runs the model against the input tensors and returns inference results as tensors.
virtual std::string name() const
Return the resource's name.
Definition service.hpp:10
Definition resource_api.hpp:46
Definition mlmodel.hpp:153
Definition mlmodel.hpp:115