Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
32 changes: 6 additions & 26 deletions app/Graph/build.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -101,7 +101,8 @@ void build_graph_linear(it_lab_ai::Graph& graph, it_lab_ai::Tensor& input,
<< '\n';
}
auto pool_layer =
std::make_shared<it_lab_ai::PoolingLayer>(shape, pooltype);
LayerFactory::createPoolingLayer(pooltype, shape, options);

layers.push_back(pool_layer);
layerpostop.push_back(false);
if (comments) std::cout << "PoolingLayer added to layers." << '\n';
Expand Down Expand Up @@ -408,8 +409,8 @@ ParseResult parse_json_model(RuntimeOptions options,
<< '\n';
}
} else if (layer_type == "GlobalAveragePool") {
auto pool_layer = std::make_shared<it_lab_ai::PoolingLayer>(
it_lab_ai::Shape({0, 0}), "average");
auto pool_layer = LayerFactory::createPoolingLayer(
"average", it_lab_ai::Shape({0, 0}), options);
layer = pool_layer;
if (comments) {
std::cout << "GlobalAveragePool layer added (will use input spatial "
Expand Down Expand Up @@ -470,30 +471,9 @@ ParseResult parse_json_model(RuntimeOptions options,
}
}

auto pool_layer =
std::make_shared<it_lab_ai::PoolingLayer>(shape, pooltype);

try {
if (strides[0] != 2 || strides[1] != 2) {
pool_layer->setStrides(strides[0], strides[1]);
}

if (pads[0] != 0 || pads[1] != 0 || pads[2] != 0 || pads[3] != 0) {
pool_layer->setPads(pads[0], pads[1], pads[2], pads[3]);
}
auto pool_layer = LayerFactory::createPoolingLayer(
pooltype, shape, options, strides, pads, dilations, ceil_mode);

if (dilations[0] != 1 || dilations[1] != 1) {
pool_layer->setDilations(dilations[0], dilations[1]);
}

pool_layer->setCeilMode(ceil_mode);

} catch (const std::exception& e) {
if (comments) {
std::cout << "Warning: Some pooling parameters not supported: "
<< e.what() << '\n';
}
}
layer = pool_layer;
} else if (layer_type.find("Flatten") != std::string::npos) {
int axis = 1;
Expand Down
14 changes: 14 additions & 0 deletions app/Graph/build.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,7 @@
#include "layers/TransposeLayer.hpp"
#include "layers_oneDNN/ConvLayer.hpp"
#include "layers_oneDNN/EWLayer.hpp"
#include "layers_oneDNN/PoolingLayer.hpp"

extern std::unordered_map<std::string, std::string> model_paths;

Expand Down Expand Up @@ -99,6 +100,19 @@ class LayerFactory {
return std::make_shared<ConvolutionalLayer>(step, pads, dilations, kernel,
bias, group, useLegacyImpl);
}

static std::shared_ptr<Layer> createPoolingLayer(
const std::string& PoolType, const Shape& shape,
const RuntimeOptions& options, const Shape& strides = {2, 2},
const Shape& pads = {0, 0, 0, 0}, const Shape& dilations = {1, 1},
bool ceil_mode = false) {
if (options.backend == Backend::kOneDnn) {
return std::make_shared<PoolingLayerOneDnn>(
shape, strides, pads, dilations, ceil_mode, PoolType);
}
return std::make_shared<PoolingLayer>(shape, strides, pads, dilations,
ceil_mode, PoolType);
}
};

} // namespace it_lab_ai
11 changes: 11 additions & 0 deletions include/layers/Shape.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@
#include <iostream>
#include <numeric>
#include <ostream>
#include <sstream>
#include <stdexcept>
#include <vector>

Expand Down Expand Up @@ -39,6 +40,16 @@ class Shape {
}
[[nodiscard]] size_t dims() const noexcept { return dims_.size(); }
[[nodiscard]] size_t get_index(const std::vector<size_t>& coords) const;
[[nodiscard]] std::string to_string() const {
std::stringstream ss;
ss << "(";
for (size_t i = 0; i < dims_.size(); ++i) {
if (i > 0) ss << ", ";
ss << dims_[i];
}
ss << ")";
return ss.str();
}
bool operator==(const Shape& other) const {
if (dims_.size() != other.dims_.size()) return false;
for (size_t i = 0; i < dims_.size(); ++i) {
Expand Down
87 changes: 87 additions & 0 deletions include/layers_oneDNN/PoolingLayer.hpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,87 @@
#pragma once

#include <dnnl.hpp>
#include <memory>
#include <string>
#include <vector>

#include "layers/Layer.hpp"

namespace it_lab_ai {

class PoolingLayerOneDnn : public Layer {
public:
explicit PoolingLayerOneDnn(const Shape& pooling_shape,
const Shape& strides = {2, 2},
const Shape& pads = {0, 0, 0, 0},
const Shape& dilations = {1, 1},
bool ceil_mode = false,
std::string pooling_type = "average")
: Layer(kPooling),
poolingShape_(pooling_shape),
strides_(strides),
pads_(pads),
dilations_(dilations),
ceil_mode_(ceil_mode),
poolingType_(std::move(pooling_type)),
engine_(std::make_unique<dnnl::engine>(dnnl::engine::kind::cpu, 0)),
stream_(std::make_unique<dnnl::stream>(*engine_)) {}

void run(const std::vector<Tensor>& input,
std::vector<Tensor>& output) override;

void setStrides(size_t h, size_t w) {
strides_ = {h, w};
initialized_ = false;
}

void setPads(size_t top, size_t bottom, size_t left, size_t right) {
pads_ = {top, bottom, left, right};
initialized_ = false;
}

void setDilations(size_t h, size_t w) {
dilations_ = {h, w};
initialized_ = false;
}

void setCeilMode(bool ceil_mode) {
ceil_mode_ = ceil_mode;
initialized_ = false;
}

#ifdef ENABLE_STATISTIC_WEIGHTS
Tensor get_weights() override {
std::vector<int> v = {0};
Tensor a = make_tensor(v);
return a;
}
#endif

private:
void initialize_onednn(const Shape& shape, Type data_type);
[[nodiscard]] dnnl::algorithm get_PoolType() const;
static void validate_input(const std::vector<Tensor>& input);
[[nodiscard]] static dnnl::memory::data_type get_dnnl_data_type(Type type);
[[nodiscard]] Shape calculate_output_shape(const Shape& input_shape) const;

Shape poolingShape_;
Shape strides_;
Shape pads_;
Shape dilations_;
bool ceil_mode_;
std::string poolingType_;

bool initialized_ = false;
Shape last_shape_;
Type last_type_;

std::unique_ptr<dnnl::engine> engine_;
std::unique_ptr<dnnl::stream> stream_;
std::unique_ptr<dnnl::pooling_forward> pool_prim_;
dnnl::memory::desc src_memory_desc_;
dnnl::memory::desc dst_memory_desc_;
Shape output_shape_;
};

} // namespace it_lab_ai
49 changes: 33 additions & 16 deletions src/layers_oneDNN/ConvLayer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -369,8 +369,9 @@ void ConvLayerOneDnn::initialize_special_conv(const Shape& input_shape,
if (has_bias) {
bias_md = dnnl::memory::desc(
{static_cast<dnnl::memory::dim>(bias_.get_shape()[0])}, dt,
dnnl::memory::format_tag::any);
dnnl::memory::format_tag::a);
}

dnnl::convolution_forward::primitive_desc conv_pd =
has_bias ? dnnl::convolution_forward::primitive_desc(
*engine_, dnnl::prop_kind::forward_inference,
Expand All @@ -391,20 +392,21 @@ void ConvLayerOneDnn::initialize_special_conv(const Shape& input_shape,

if (data_type == Type::kFloat) {
const std::vector<float>& kernel_data = *kernel_.as<float>();

size_t kh = k_shape[0];
size_t kw = k_shape[1];
size_t kic = k_shape[2];
size_t koc = k_shape[3];

std::vector<float> reordered(koc * kic * kh * kw);
size_t idx = 0;

for (size_t oc = 0; oc < koc; oc++) {
for (size_t ic = 0; ic < kic; ic++) {
for (size_t h = 0; h < kh; h++) {
for (size_t w = 0; w < kw; w++) {
for (size_t oc = 0; oc < koc; ++oc) {
for (size_t ic = 0; ic < kic; ++ic) {
for (size_t h = 0; h < kh; ++h) {
for (size_t w = 0; w < kw; ++w) {
size_t src_idx = ((h * kw + w) * kic + ic) * koc + oc;
reordered[idx++] = kernel_data[src_idx];
size_t dst_idx = ((oc * kic + ic) * kh + h) * kw + w;
reordered[dst_idx] = kernel_data[src_idx];
}
}
}
Expand All @@ -421,14 +423,13 @@ void ConvLayerOneDnn::initialize_special_conv(const Shape& input_shape,
size_t koc = k_shape[3];

std::vector<float> reordered(koc * kic * kh * kw);
size_t idx = 0;

for (size_t oc = 0; oc < koc; oc++) {
for (size_t ic = 0; ic < kic; ic++) {
for (size_t h = 0; h < kh; h++) {
for (size_t w = 0; w < kw; w++) {
for (size_t oc = 0; oc < koc; ++oc) {
for (size_t ic = 0; ic < kic; ++ic) {
for (size_t h = 0; h < kh; ++h) {
for (size_t w = 0; w < kw; ++w) {
size_t src_idx = ((h * kw + w) * kic + ic) * koc + oc;
reordered[idx++] = static_cast<float>(kernel_data_int[src_idx]);
size_t dst_idx = ((oc * kic + ic) * kh + h) * kw + w;
reordered[dst_idx] = static_cast<float>(kernel_data_int[src_idx]);
}
}
}
Expand All @@ -438,6 +439,22 @@ void ConvLayerOneDnn::initialize_special_conv(const Shape& input_shape,
reordered.size() * sizeof(float));
}

if (has_bias) {
if (data_type == Type::kFloat) {
const std::vector<float>& bias_data = *bias_.as<float>();
std::memcpy(bias_memory_.get_data_handle(), bias_data.data(),
bias_data.size() * sizeof(float));
} else if (data_type == Type::kInt) {
const std::vector<int>& bias_data_int = *bias_.as<int>();
std::vector<float> float_bias(bias_data_int.size());
std::transform(bias_data_int.begin(), bias_data_int.end(),
float_bias.begin(),
[](int val) { return static_cast<float>(val); });
std::memcpy(bias_memory_.get_data_handle(), float_bias.data(),
float_bias.size() * sizeof(float));
}
}

conv_prim_ = std::make_unique<dnnl::convolution_forward>(conv_pd);
initialized_ = true;

Expand Down Expand Up @@ -494,8 +511,8 @@ void ConvLayerOneDnn::run_special_conv(const std::vector<Tensor>& input,
Shape output_shape = get_output_shape(input_shape);

if (data_type == Type::kFloat) {
std::vector<float> output_data(dst_memory_.get_desc().get_size() /
sizeof(float));
size_t output_size = dst_memory_.get_desc().get_size() / sizeof(float);
std::vector<float> output_data(output_size);
std::memcpy(output_data.data(), dst_memory_.get_data_handle(),
output_data.size() * sizeof(float));
output[0] = make_tensor(output_data, output_shape);
Expand Down
Loading
Loading