Skip to content

Navigation Menu

Sign in
Appearance settings

Search code, repositories, users, issues, pull requests...

Provide feedback

We read every piece of feedback, and take your input very seriously.

Saved searches

Use saved searches to filter your results more quickly

Appearance settings

Commit 1c94c38

Browse filesBrowse files
authored
IDF release/v4.4 a79dc75f0a (espressif#6048)
1 parent 7cf1623 commit 1c94c38
Copy full SHA for 1c94c38

File tree

Expand file treeCollapse file tree

82 files changed

+214
-191
lines changed
Filter options

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.
Dismiss banner
Expand file treeCollapse file tree

82 files changed

+214
-191
lines changed

‎platform.txt

Copy file name to clipboardExpand all lines: platform.txt
+3-3Lines changed: 3 additions & 3 deletions
Large diffs are not rendered by default.

‎tools/platformio-build-esp32.py

Copy file name to clipboardExpand all lines: tools/platformio-build-esp32.py
+1-1Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -303,7 +303,7 @@
303303
"UNITY_INCLUDE_CONFIG_H",
304304
"WITH_POSIX",
305305
"_GNU_SOURCE",
306-
("IDF_VER", '\\"v4.4-beta1-183-gf23dcd3555\\"'),
306+
("IDF_VER", '\\"v4.4-beta1-189-ga79dc75f0a\\"'),
307307
"ESP_PLATFORM",
308308
"_POSIX_READER_WRITER_LOCKS",
309309
"ARDUINO_ARCH_ESP32",

‎tools/platformio-build-esp32c3.py

Copy file name to clipboardExpand all lines: tools/platformio-build-esp32c3.py
+1-1Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -293,7 +293,7 @@
293293
"UNITY_INCLUDE_CONFIG_H",
294294
"WITH_POSIX",
295295
"_GNU_SOURCE",
296-
("IDF_VER", '\\"v4.4-beta1-183-gf23dcd3555\\"'),
296+
("IDF_VER", '\\"v4.4-beta1-189-ga79dc75f0a\\"'),
297297
"ESP_PLATFORM",
298298
"_POSIX_READER_WRITER_LOCKS",
299299
"ARDUINO_ARCH_ESP32",

‎tools/platformio-build-esp32s2.py

Copy file name to clipboardExpand all lines: tools/platformio-build-esp32s2.py
+1-1Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -290,7 +290,7 @@
290290
"UNITY_INCLUDE_CONFIG_H",
291291
"WITH_POSIX",
292292
"_GNU_SOURCE",
293-
("IDF_VER", '\\"v4.4-beta1-183-gf23dcd3555\\"'),
293+
("IDF_VER", '\\"v4.4-beta1-189-ga79dc75f0a\\"'),
294294
"ESP_PLATFORM",
295295
"_POSIX_READER_WRITER_LOCKS",
296296
"ARDUINO_ARCH_ESP32",

‎tools/sdk/esp32/include/config/sdkconfig.h

Copy file name to clipboardExpand all lines: tools/sdk/esp32/include/config/sdkconfig.h
+4-2Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -406,7 +406,9 @@
406406
#define CONFIG_LWIP_IPV6_ND6_NUM_NEIGHBORS 5
407407
#define CONFIG_LWIP_ICMP 1
408408
#define CONFIG_LWIP_MAX_RAW_PCBS 16
409-
#define CONFIG_LWIP_SNTP_MAX_SERVERS 1
409+
#define CONFIG_LWIP_SNTP_MAX_SERVERS 3
410+
#define CONFIG_LWIP_DHCP_GET_NTP_SRV 1
411+
#define CONFIG_LWIP_DHCP_MAX_NTP_SERVERS 1
410412
#define CONFIG_LWIP_SNTP_UPDATE_DELAY 3600000
411413
#define CONFIG_LWIP_ESP_LWIP_ASSERT 1
412414
#define CONFIG_LWIP_HOOK_TCP_ISN_DEFAULT 1
@@ -677,5 +679,5 @@
677679
#define CONFIG_ULP_COPROC_RESERVE_MEM CONFIG_ESP32_ULP_COPROC_RESERVE_MEM
678680
#define CONFIG_WARN_WRITE_STRINGS CONFIG_COMPILER_WARN_WRITE_STRINGS
679681
#define CONFIG_WIFI_LWIP_ALLOCATION_FROM_SPIRAM_FIRST CONFIG_SPIRAM_TRY_ALLOCATE_WIFI_LWIP
680-
#define CONFIG_ARDUINO_IDF_COMMIT "f23dcd3555"
682+
#define CONFIG_ARDUINO_IDF_COMMIT "a79dc75f0a"
681683
#define CONFIG_ARDUINO_IDF_BRANCH "release/v4.4"

‎tools/sdk/esp32/include/esp-face/include/layer/dl_layer_expand_dims.hpp

Copy file name to clipboardExpand all lines: tools/sdk/esp32/include/esp-face/include/layer/dl_layer_expand_dims.hpp
+2-3Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -66,19 +66,18 @@ namespace dl
6666
this->output_exponent = input.exponent;
6767
if (!this->inplace)
6868
{
69-
if (this->output != NULL)
69+
if (this->output == NULL)
7070
{
7171
this->output = new Tensor<feature_t>;
7272
}
7373
this->output->set_exponent(this->output_exponent);
74-
this->output->set_shape(this->output_shape);
74+
this->output->set_shape(input.shape);
7575
this->output->expand_dims(this->axis);
7676
this->output->free_element();
7777
}
7878
else
7979
{
8080
this->output = &input;
81-
this->output->set_shape(this->output_shape);
8281
this->output->expand_dims(this->axis);
8382
}
8483
this->output_shape = this->output->shape;

‎tools/sdk/esp32/include/esp-face/include/layer/dl_layer_flatten.hpp

Copy file name to clipboardExpand all lines: tools/sdk/esp32/include/esp-face/include/layer/dl_layer_flatten.hpp
+1-1Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -59,7 +59,7 @@ namespace dl
5959
this->output_shape = {input.get_size()};
6060
if (!this->inplace)
6161
{
62-
if (this->output != NULL)
62+
if (this->output == NULL)
6363
{
6464
this->output = new Tensor<feature_t>;
6565
}

‎tools/sdk/esp32/include/esp-face/include/layer/dl_layer_leakyrelu.hpp

Copy file name to clipboardExpand all lines: tools/sdk/esp32/include/esp-face/include/layer/dl_layer_leakyrelu.hpp
+11-11Lines changed: 11 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -10,14 +10,14 @@ namespace dl
1010
namespace layer
1111
{
1212
/**
13-
* @brief LeakyReLU(input).
13+
* @brief LeakyRelu(input).
1414
*
1515
* @tparam feature_t supports int16_t and int8_t,
1616
* - int16_t: stands for operation in int16_t quantize
1717
* - int8_t: stands for operation in int8_t quantize
1818
*/
1919
template <typename feature_t>
20-
class LeakyReLU : public Layer
20+
class LeakyRelu : public Layer
2121
{
2222
private:
2323
feature_t activation_alpha; /*<! quantized alpha >*/
@@ -28,26 +28,26 @@ namespace dl
2828
std::vector<int> output_shape; /*<! output shape of leakyrelu >*/
2929
public:
3030
/**
31-
* @brief Construct a new LeakyReLU object
31+
* @brief Construct a new LeakyRelu object
3232
*
3333
* @param activation_alpha quantized alpha
3434
* @param activation_exponent exponent of quantized alpha
3535
* @param name name of leakyrelu
3636
* @param inplace true: the output will store to input0
3737
* false: the output will store to a separate memory
3838
*/
39-
LeakyReLU(const int activation_alpha, const int activation_exponent, const char *name = "LeakyReLU", bool inplace = false) : Layer(name), output(NULL), output_shape({})
39+
LeakyRelu(const int activation_alpha, const int activation_exponent, const char *name = "LeakyRelu", bool inplace = false) : Layer(name), output(NULL), output_shape({})
4040
{
4141
this->activation_alpha = activation_alpha;
4242
this->activation_exponent = activation_exponent;
4343
this->inplace = inplace;
4444
}
4545

4646
/**
47-
* @brief Destroy the LeakyReLU object
47+
* @brief Destroy the LeakyRelu object
4848
*
4949
*/
50-
~LeakyReLU()
50+
~LeakyRelu()
5151
{
5252
if ((!this->inplace) && (this->output != NULL))
5353
{
@@ -66,7 +66,7 @@ namespace dl
6666
this->output_shape = input.shape;
6767
if (!this->inplace)
6868
{
69-
if (this->output != NULL)
69+
if (this->output == NULL)
7070
{
7171
this->output = new Tensor<feature_t>;
7272
}
@@ -90,19 +90,19 @@ namespace dl
9090
/**
9191
* @brief Get the output
9292
*
93-
* @return Tensor<feature_t>& LeakyReLU result
93+
* @return Tensor<feature_t>& LeakyRelu result
9494
*/
9595
Tensor<feature_t> &get_output()
9696
{
9797
return *this->output;
9898
}
9999

100100
/**
101-
* @brief Call LeakyReLU operation.
101+
* @brief Call LeakyRelu operation.
102102
*
103103
* @param input as an input
104104
* @param assign_core not effective yet
105-
* @return LeakyReLU result
105+
* @return LeakyRelu result
106106
*/
107107
Tensor<feature_t> &call(Tensor<feature_t> &input, const std::vector<int> &assign_core = CONFIG_DEFAULT_ASSIGN_CORE)
108108
{
@@ -130,7 +130,7 @@ namespace dl
130130
{
131131
this->output->set_shape(this->output_shape);
132132
}
133-
nn::leakyrelu<true>(*this->output, input, this->activation_alpha, this->activation_exponent, assign_core);
133+
nn::leakyrelu(*this->output, input, this->activation_alpha, this->activation_exponent, assign_core);
134134
DL_LOG_LAYER_LATENCY_END(this->name, "leakyrelu");
135135
}
136136

‎tools/sdk/esp32/include/esp-face/include/layer/dl_layer_max2d.hpp

Copy file name to clipboardExpand all lines: tools/sdk/esp32/include/esp-face/include/layer/dl_layer_max2d.hpp
+2-2Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -68,7 +68,7 @@ namespace dl
6868

6969
if (!this->inplace)
7070
{
71-
if (this->output != NULL)
71+
if (this->output == NULL)
7272
{
7373
this->output = new Tensor<feature_t>;
7474
}
@@ -132,7 +132,7 @@ namespace dl
132132
{
133133
this->output->set_shape(this->output_shape);
134134
}
135-
nn::max2d<true>(*this->output, input0, input1, assign_core);
135+
nn::max2d(*this->output, input0, input1, assign_core);
136136
DL_LOG_LAYER_LATENCY_END(this->name, "max2d");
137137
}
138138

‎tools/sdk/esp32/include/esp-face/include/layer/dl_layer_min2d.hpp

Copy file name to clipboardExpand all lines: tools/sdk/esp32/include/esp-face/include/layer/dl_layer_min2d.hpp
+2-2Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -68,7 +68,7 @@ namespace dl
6868

6969
if (!this->inplace)
7070
{
71-
if (this->output != NULL)
71+
if (this->output == NULL)
7272
{
7373
this->output = new Tensor<feature_t>;
7474
}
@@ -132,7 +132,7 @@ namespace dl
132132
{
133133
this->output->set_shape(this->output_shape);
134134
}
135-
nn::min2d<true>(*this->output, input0, input1, assign_core);
135+
nn::min2d(*this->output, input0, input1, assign_core);
136136
DL_LOG_LAYER_LATENCY_END(this->name, "min2d");
137137
}
138138

‎tools/sdk/esp32/include/esp-face/include/layer/dl_layer_mul2d.hpp

Copy file name to clipboardExpand all lines: tools/sdk/esp32/include/esp-face/include/layer/dl_layer_mul2d.hpp
+2-2Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -75,7 +75,7 @@ namespace dl
7575

7676
if (!this->inplace)
7777
{
78-
if (this->output != NULL)
78+
if (this->output == NULL)
7979
{
8080
this->output = new Tensor<feature_t>;
8181
}
@@ -140,7 +140,7 @@ namespace dl
140140
{
141141
this->output->set_shape(this->output_shape);
142142
}
143-
nn::mul2d<true>(*this->output, input0, input1, this->activation, assign_core);
143+
nn::mul2d(*this->output, input0, input1, this->activation, assign_core);
144144
DL_LOG_LAYER_LATENCY_END(this->name, "mul2d");
145145
}
146146

‎tools/sdk/esp32/include/esp-face/include/layer/dl_layer_prelu.hpp

Copy file name to clipboardExpand all lines: tools/sdk/esp32/include/esp-face/include/layer/dl_layer_prelu.hpp
+15-15Lines changed: 15 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -10,36 +10,36 @@ namespace dl
1010
namespace layer
1111
{
1212
/**
13-
* @brief PReLU(input).
13+
* @brief PRelu(input).
1414
*
1515
* @tparam feature_t supports int16_t and int8_t,
1616
* - int16_t: stands for operation in int16_t quantize
1717
* - int8_t: stands for operation in int8_t quantize
1818
*/
1919
template <typename feature_t>
20-
class PReLU : public Layer
20+
class PRelu : public Layer
2121
{
2222
private:
23-
feature_t *activation_element; /*<! quantized alpha elements along channel axis >*/
23+
const feature_t *activation_element; /*<! quantized alpha elements along channel axis >*/
2424
int activation_exponent; /*<! exponent of quantized alpha elements >*/
2525
Tensor<feature_t> *output; /*<! output ptr of prelu >*/
2626
bool inplace; /*<! true: the output will store to input0
2727
false: the output will store to a separate memory >*/
2828
std::vector<int> output_shape; /*<! output shape of prelu >*/
2929
public:
3030
/**
31-
* @brief Construct a new PReLU object
31+
* @brief Construct a new PRelu object
3232
*
3333
* @param activation_element quantized alpha elements along channel axis
3434
* @param activation_exponent exponent of quantized alpha elements
3535
* @param name name of prelu
3636
* @param inplace true: the output will store to input0
3737
* false: the output will store to a separate memory
3838
*/
39-
PReLU(const feature_t *activation_element,
39+
PRelu(const feature_t *activation_element,
4040
const int activation_exponent = 0,
41-
const char *name = NULL,
42-
bool inplace = "PReLU") : Layer(name),
41+
const char *name = "PRelu",
42+
bool inplace = false) : Layer(name),
4343
activation_element(activation_element),
4444
activation_exponent(activation_exponent),
4545
output(NULL),
@@ -49,10 +49,10 @@ namespace dl
4949
}
5050

5151
/**
52-
* @brief Destroy the PReLU object
52+
* @brief Destroy the PRelu object
5353
*
5454
*/
55-
~PReLU()
55+
~PRelu()
5656
{
5757
if ((!this->inplace) && (this->output != NULL))
5858
{
@@ -71,7 +71,7 @@ namespace dl
7171
this->output_shape = input.shape;
7272
if (!this->inplace)
7373
{
74-
if (this->output != NULL)
74+
if (this->output == NULL)
7575
{
7676
this->output = new Tensor<feature_t>;
7777
}
@@ -94,19 +94,19 @@ namespace dl
9494
/**
9595
* @brief Get the output
9696
*
97-
* @return Tensor<feature_t>& PReLU result
97+
* @return Tensor<feature_t>& PRelu result
9898
*/
9999
Tensor<feature_t> &get_output()
100100
{
101101
return *this->output;
102102
}
103103

104104
/**
105-
* @brief Call PReLU operation.
105+
* @brief Call PRelu operation.
106106
*
107107
* @param input as an input
108108
* @param assign_core not effective yet
109-
* @return PReLU result
109+
* @return PRelu result
110110
*/
111111
Tensor<feature_t> &call(Tensor<feature_t> &input, const std::vector<int> &assign_core = CONFIG_DEFAULT_ASSIGN_CORE)
112112
{
@@ -125,7 +125,7 @@ namespace dl
125125

126126
DL_LOG_LAYER_LATENCY_START();
127127
nn::prelu(*this->output, input, this->activation_element, this->activation_exponent, assign_core);
128-
DL_LOG_LAYER_LATENCY_END(this->name, "leakyrelu");
128+
DL_LOG_LAYER_LATENCY_END(this->name, "prelu");
129129
}
130130
else
131131
{
@@ -135,7 +135,7 @@ namespace dl
135135
this->output->set_shape(this->output_shape);
136136
}
137137
nn::prelu(*this->output, input, this->activation_element, this->activation_exponent, assign_core);
138-
DL_LOG_LAYER_LATENCY_END(this->name, "leakyrelu");
138+
DL_LOG_LAYER_LATENCY_END(this->name, "prelu");
139139
}
140140

141141
return *this->output;

‎tools/sdk/esp32/include/esp-face/include/layer/dl_layer_relu.hpp

Copy file name to clipboardExpand all lines: tools/sdk/esp32/include/esp-face/include/layer/dl_layer_relu.hpp
+1-1Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -61,7 +61,7 @@ namespace dl
6161
this->output_shape = input.shape;
6262
if (!this->inplace)
6363
{
64-
if (this->output != NULL)
64+
if (this->output == NULL)
6565
{
6666
this->output = new Tensor<feature_t>;
6767
}

‎tools/sdk/esp32/include/esp-face/include/layer/dl_layer_reshape.hpp

Copy file name to clipboardExpand all lines: tools/sdk/esp32/include/esp-face/include/layer/dl_layer_reshape.hpp
+5-3Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -64,19 +64,21 @@ namespace dl
6464
this->output_exponent = input.exponent;
6565
if (!this->inplace)
6666
{
67-
if (this->output != NULL)
67+
if (this->output == NULL)
6868
{
6969
this->output = new Tensor<feature_t>;
7070
}
7171
this->output->set_exponent(this->output_exponent);
72-
this->output->set_shape(this->output_shape);
72+
this->output->set_shape(input.shape);
73+
this->output->reshape(this->output_shape);
7374
this->output->free_element();
7475
}
7576
else
7677
{
7778
this->output = &input;
78-
this->output->set_shape(this->output_shape);
79+
this->output->reshape(this->output_shape);
7980
}
81+
this->output_shape = this->output->shape;
8082

8183
if (print_shape)
8284
{

‎tools/sdk/esp32/include/esp-face/include/layer/dl_layer_squeeze.hpp

Copy file name to clipboardExpand all lines: tools/sdk/esp32/include/esp-face/include/layer/dl_layer_squeeze.hpp
+1-2Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -66,7 +66,7 @@ namespace dl
6666
this->output_exponent = input.exponent;
6767
if (!this->inplace)
6868
{
69-
if (this->output != NULL)
69+
if (this->output == NULL)
7070
{
7171
this->output = new Tensor<feature_t>;
7272
}
@@ -78,7 +78,6 @@ namespace dl
7878
else
7979
{
8080
this->output = &input;
81-
this->output->set_shape(input.shape);
8281
this->output->squeeze(this->axis);
8382
}
8483
this->output_shape = this->output->shape;

0 commit comments

Comments
0 (0)
Morty Proxy This is a proxified and sanitized view of the page, visit original site.