Thanks to visit codestin.com
Credit goes to github.com

Skip to content

Commit d35c677

Browse files
committed
merged with develop
2 parents 21499a0 + ceba941 commit d35c677

File tree

8 files changed

+205
-3
lines changed

8 files changed

+205
-3
lines changed

.circleci/config.yml

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -33,9 +33,11 @@ jobs:
3333
name: Generate code coverage
3434
command: |
3535
cd build
36+
rm -rf TESTS/extern
3637
lcov -c --directory TESTS --output-file main_coverage.info > /tmp/test_results/coverage_summary.txt
3738
genhtml main_coverage.info --output-directory /tmp/coverage
38-
coveralls -b TESTS -r TESTS --gcov-options '\-lp'
39+
# TODO Fix coveralls
40+
#coveralls -b TESTS -r TESTS --gcov-options '\-lp'
3941
- store_test_results:
4042
path: /tmp/test_results
4143
- store_artifacts:

.mbedignore

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3,4 +3,5 @@ extern/*
33
docs/*
44
proposals/*
55
python/*
6-
tutorials/*
6+
tutorials/*
7+
tanh_model/*

TESTS/CMakeLists.txt

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -59,6 +59,7 @@ package_add_test_with_libraries(test_sq_conv2d operators/test_sq_conv2d utensor
5959
package_add_test_with_libraries(test_sq_softmax operators/test_sq_softmax utensor operators "${PROJECT_DIR}/test-data/")
6060
package_add_test_with_libraries(test_sq_logistic operators/test_sq_logistic utensor operators "${PROJECT_DIR}/test-data/")
6161
package_add_test_with_libraries(test_sq_tanh operators/test_sq_tanh.cpp utensor operators "${PROJECT_DIR}/test-data/")
62+
package_add_test_with_libraries(test_transpose operators/test_transpose utensor operators "${PROJECT_DIR}/test-data/")
6263

6364
# Includes
6465
package_add_test_with_libraries(test_top_include library/test_top_include utensor library "${PROJECT_DIR}/test-data/")
Lines changed: 18 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,18 @@
1+
#ifndef _TRANSPOSE_TEST_H
2+
#define _TRANSPOSE_TEST_H
3+
4+
static const int32_t transpose_perm_arr[4] = {2, 1, 0, 3};
5+
static const float random_input_arr[15] = {
6+
3.484638214111328, 2.033799886703491, 3.2437448501586914,
7+
4.783249855041504, 3.497023582458496, 3.511240005493164,
8+
1.558927297592163, 3.7084484100341797, 2.570117712020874,
9+
0.2405869960784912, 1.8713605403900146, 4.19132661819458,
10+
0.6596618890762329, 0.9029078483581543, 0.2223271131515503};
11+
static const float ref_output_arr[15] = {
12+
3.484638214111328, 3.511240005493164, 1.8713605403900146,
13+
2.033799886703491, 1.558927297592163, 4.19132661819458,
14+
3.2437448501586914, 3.7084484100341797, 0.6596618890762329,
15+
4.783249855041504, 2.570117712020874, 0.9029078483581543,
16+
3.497023582458496, 0.2405869960784912, 0.2223271131515503};
17+
18+
#endif // _TRANSPOSE_TEST_H

TESTS/operators/test_transpose.cpp

Lines changed: 44 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,44 @@
1+
#include <cstring>
2+
#include <iostream>
3+
4+
#include "RamTensor.hpp"
5+
#include "RomTensor.hpp"
6+
#include "Transpose.hpp"
7+
#include "arenaAllocator.hpp"
8+
#include "constants_transpose.hpp"
9+
#include "context.hpp"
10+
#include "gtest/gtest.h"
11+
12+
using std::cout;
13+
using std::endl;
14+
15+
using namespace uTensor;
16+
using namespace uTensor::ReferenceOperators;
17+
TEST(Transpose, transpose_test) {
18+
localCircularArenaAllocator<1024> meta_allocator;
19+
localCircularArenaAllocator<15 * 2 * sizeof(float), uint32_t> ram_allocator;
20+
Context::get_default_context()->set_metadata_allocator(&meta_allocator);
21+
Context::get_default_context()->set_ram_data_allocator(&ram_allocator);
22+
23+
Tensor input_tensor = new RomTensor({3, 1, 5, 1}, flt, random_input_arr);
24+
Tensor perm_tensor = new RomTensor({4}, i32, transpose_perm_arr);
25+
26+
TensorShape input_target_shape(3, 1, 5, 1);
27+
TensorShape input_shape = input_tensor->get_shape();
28+
EXPECT_TRUE(input_target_shape == input_shape);
29+
30+
Tensor output_tensor = new RamTensor(flt);
31+
TransposeOperator<float> op;
32+
33+
op.set_inputs({{TransposeOperator<float>::input, input_tensor},
34+
{TransposeOperator<float>::perm, perm_tensor}})
35+
.set_outputs({{TransposeOperator<float>::output, output_tensor}})
36+
.eval();
37+
38+
for (int i = 0; i < 15; ++i) {
39+
EXPECT_NEAR((float)output_tensor(i), ref_output_arr[i], 0.0001);
40+
}
41+
TensorShape target_shape(5, 1, 3, 1);
42+
TensorShape output_shape = output_tensor->get_shape();
43+
EXPECT_TRUE(target_shape == output_shape);
44+
}

src/uTensor/core/types.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -153,7 +153,7 @@ TensorStrides::TensorStrides(TensorShape& shape) {
153153
}
154154
_strides[last_idx] = 1;
155155
uint32_t s = 1;
156-
for (size_t i = last_idx - 1; i >= 0; --i) {
156+
for (int32_t i = last_idx - 1; i >= 0; --i) {
157157
s *= shape[i + 1];
158158
_strides[i] = s;
159159
}

src/uTensor/ops/Convolution.hpp

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -291,6 +291,9 @@ using MaxPoolOperator = GenericPoolOperator<T, MaxFilter<T>>;
291291
template <typename T>
292292
using AvgPoolOperator = GenericPoolOperator<T, AvgFilter<T>>;
293293

294+
template<typename T>
295+
using MinPoolOperator = GenericPoolOperator<T, MinFilter<T>>;
296+
294297
}
295298
} // namespace uTensor
296299
#endif

src/uTensor/ops/Transpose.hpp

Lines changed: 133 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,133 @@
1+
#ifndef UTENSOR_TRANSPOSE_H
2+
#define UTENSOR_TRANSPOSE_H
3+
4+
#include <cstring>
5+
6+
#include "context.hpp"
7+
#include "operatorBase.hpp"
8+
#include "tensor.hpp"
9+
#include "types.hpp"
10+
#include "uTensor_util.hpp"
11+
12+
namespace uTensor {
13+
namespace ReferenceOperators {
14+
15+
// Transpose (Swap Axes) as a port from Numpy
16+
// using stride interation in the order of transpose axes
17+
template <typename Tin>
18+
class TransposeOperator : public OperatorInterface<2, 1> {
19+
/* reshape input as the shape of output*/
20+
public:
21+
enum names_in : uint8_t { input, perm };
22+
enum names_out : uint8_t { output };
23+
24+
virtual void compute() {
25+
const Tensor& perm_tensor = inputs[perm].tensor();
26+
if (perm_tensor.get_shape().num_dims() > 1) {
27+
uTensor_printf(
28+
"the input tensor perm should be a vector (dimension should be 1)\n");
29+
Context::get_default_context()->throwError(new InvalidTensorInputError);
30+
}
31+
if (perm_tensor->get_type() != i32) {
32+
uTensor_printf("expecting perm tensor of element type int32_t\n");
33+
Context::get_default_context()->throwError(
34+
new InvalidTensorDataTypeError);
35+
}
36+
Tensor& input_tensor = inputs[input].tensor();
37+
TensorShape& input_shape = input_tensor.get_shape();
38+
input_shape.update_dims();
39+
40+
// Strides are used to iterate over the dataset, and transfer
41+
// the input tensor data, into the output tensor
42+
TensorStrides input_strides = TensorStrides(input_shape);
43+
44+
Tensor& output_tensor = outputs[output].tensor();
45+
46+
// Create a placeholder to calculate the output shape
47+
// Normally this would reference output shape, but since this could (usually
48+
// would) be referencing the input, let's keep a dedicated value
49+
TensorShape output_shape = TensorShape(1, 1, 1, 1);
50+
TensorStrides output_strides = TensorStrides(output_shape);
51+
TensorShape offsets = TensorShape(input_shape.num_dims());
52+
53+
for (size_t i = 0; i < 4; ++i) {
54+
output_shape[i] = 0;
55+
output_strides[i] = 0;
56+
57+
// Offsets are used to avoid multiple for loops
58+
offsets[i] = 0;
59+
}
60+
61+
for (size_t i = 0; i < (size_t)input_shape.num_dims(); ++i) {
62+
int32_t axis = static_cast<int32_t>(perm_tensor(i));
63+
output_shape[axis] = input_shape[i];
64+
65+
// output_strides(i) is derived from axes and input_strides
66+
output_strides[axis] = input_strides[i];
67+
}
68+
69+
// Output shape can be asserted once the transform
70+
// effect has been determined
71+
output_shape.update_dims();
72+
output_tensor->resize(output_shape);
73+
74+
// Perform some basic checks
75+
if (input_tensor->num_elems() != output_tensor->num_elems()) {
76+
uTensor_printf("inconsistent input and output shape for reshape\n");
77+
Context::get_default_context()->throwError(new InvalidReshapeError);
78+
return;
79+
}
80+
if (input_tensor->get_type() != output_tensor->get_type()) {
81+
uTensor_printf("inconsistent input and output data type for reshape\n");
82+
Context::get_default_context()->throwError(
83+
new InvalidTensorDataTypeError);
84+
return;
85+
}
86+
if (!_check_input_shape()) {
87+
Context::get_default_context()->throwError(
88+
new InvalidTensorDataTypeError);
89+
return;
90+
}
91+
92+
// copy data
93+
for (uint32_t i = 0; i < input_tensor->num_elems(); ++i) {
94+
// Index of the source value, must be calculated
95+
// using the output strides and output shape
96+
uint32_t idx = 0;
97+
for (uint32_t j = 0; j < output_shape.num_dims(); j++) {
98+
idx += offsets[j] * output_strides[j];
99+
}
100+
101+
// this is not copy: `output_tensor(i) = input_tensor(i);`
102+
output_tensor(i) = static_cast<Tin>(input_tensor(idx));
103+
104+
// Update offsets, to iterate sequentially along strides
105+
// in the order of axes
106+
for (int32_t j = output_shape.num_dims() - 1; j >= 0; j--) {
107+
offsets[j] = (offsets[j] + 1) % (output_shape[j]);
108+
if (offsets[j] > 0) {
109+
break;
110+
}
111+
}
112+
}
113+
}
114+
115+
private:
116+
bool _check_input_shape() {
117+
const Tensor& input_tensor = inputs[input].tensor();
118+
const TensorShape& shape = input_tensor->get_shape();
119+
uint8_t num_dims = shape.num_dims();
120+
for (int i = 0; i < num_dims; ++i) {
121+
if (shape[i] < 0) {
122+
uTensor_printf("the output shape must be all positive\n");
123+
return false;
124+
}
125+
}
126+
return true;
127+
}
128+
};
129+
130+
} // namespace ReferenceOperators
131+
} // namespace uTensor
132+
133+
#endif // UTENSOR_TRANSPOSE_H

0 commit comments

Comments
 (0)