Skip to content

Navigation Menu

Sign in
Appearance settings

Search code, repositories, users, issues, pull requests...

Provide feedback

We read every piece of feedback, and take your input very seriously.

Saved searches

Use saved searches to filter your results more quickly

Appearance settings

Commit 5912630

Browse filesBrowse files
committed
Update clang-format version to 15 in GitHub actions
1 parent 0ba4365 commit 5912630
Copy full SHA for 5912630

File tree

Expand file treeCollapse file tree

12 files changed

+26
-26
lines changed
Filter options
Expand file treeCollapse file tree

12 files changed

+26
-26
lines changed

‎.github/workflows/unix_cpu_build.yml

Copy file name to clipboardExpand all lines: .github/workflows/unix_cpu_build.yml
+2-2Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -17,11 +17,11 @@ jobs:
1717
uses: actions/checkout@master
1818

1919
- name: Check Sources
20-
uses: DoozyX/clang-format-lint-action@v0.14
20+
uses: DoozyX/clang-format-lint-action@v0.15
2121
with:
2222
source: './src ./test ./examples'
2323
extensions: 'h,cpp,hpp'
24-
clangFormatVersion: 14
24+
clangFormatVersion: 15
2525

2626
documentation:
2727
name: Documentation

‎src/api/unified/symbol_manager.hpp

Copy file name to clipboardExpand all lines: src/api/unified/symbol_manager.hpp
+1-1Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -156,7 +156,7 @@ bool checkArrays(af_backend activeBackend, T a, Args... arg) {
156156
if (index_ != arrayfire::unified::getActiveBackend()) { \
157157
index_ = arrayfire::unified::getActiveBackend(); \
158158
func = (af_func)arrayfire::common::getFunctionPointer( \
159-
arrayfire::unified::getActiveHandle(), __func__); \
159+
arrayfire::unified::getActiveHandle(), __func__); \
160160
} \
161161
return func(__VA_ARGS__); \
162162
} else { \

‎src/backend/common/graphics_common.cpp

Copy file name to clipboardExpand all lines: src/backend/common/graphics_common.cpp
+1-1Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -260,7 +260,7 @@ fg_window ForgeManager::getMainWindow() {
260260
}
261261
fg_window w = nullptr;
262262
forgeError = this->mPlugin->fg_create_window(
263-
&w, WIDTH, HEIGHT, "ArrayFire", NULL, true);
263+
&w, WIDTH, HEIGHT, "ArrayFire", NULL, true);
264264
if (forgeError != FG_ERR_NONE) { return; }
265265
this->setWindowChartGrid(w, 1, 1);
266266
this->mPlugin->fg_make_window_current(w);

‎src/backend/cpu/convolve.cpp

Copy file name to clipboardExpand all lines: src/backend/cpu/convolve.cpp
+2-2Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -193,7 +193,7 @@ Array<T> conv2DataGradient(const Array<T> &incoming_gradient,
193193
Array<T> collapsed_gradient = incoming_gradient;
194194
collapsed_gradient = reorder(collapsed_gradient, dim4(0, 1, 3, 2));
195195
collapsed_gradient = modDims(
196-
collapsed_gradient, dim4(cDims[0] * cDims[1] * cDims[3], cDims[2]));
196+
collapsed_gradient, dim4(cDims[0] * cDims[1] * cDims[3], cDims[2]));
197197

198198
Array<T> res =
199199
matmul(collapsed_gradient, collapsed_filter, AF_MAT_NONE, AF_MAT_TRANS);
@@ -232,7 +232,7 @@ Array<T> conv2FilterGradient(const Array<T> &incoming_gradient,
232232
Array<T> collapsed_gradient = incoming_gradient;
233233
collapsed_gradient = reorder(collapsed_gradient, dim4(0, 1, 3, 2));
234234
collapsed_gradient = modDims(
235-
collapsed_gradient, dim4(cDims[0] * cDims[1] * cDims[3], cDims[2]));
235+
collapsed_gradient, dim4(cDims[0] * cDims[1] * cDims[3], cDims[2]));
236236

237237
Array<T> res =
238238
matmul(unwrapped, collapsed_gradient, AF_MAT_NONE, AF_MAT_NONE);

‎src/backend/cuda/convolveNN.cpp

Copy file name to clipboardExpand all lines: src/backend/cuda/convolveNN.cpp
+2-2Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -260,7 +260,7 @@ Array<T> data_gradient_base(const Array<T> &incoming_gradient,
260260
Array<T> collapsed_gradient = incoming_gradient;
261261
collapsed_gradient = reorder(collapsed_gradient, dim4(0, 1, 3, 2));
262262
collapsed_gradient = modDims(
263-
collapsed_gradient, dim4(cDims[0] * cDims[1] * cDims[3], cDims[2]));
263+
collapsed_gradient, dim4(cDims[0] * cDims[1] * cDims[3], cDims[2]));
264264

265265
T alpha = scalar<T>(1.0);
266266
T beta = scalar<T>(0.0);
@@ -390,7 +390,7 @@ Array<T> filter_gradient_base(const Array<T> &incoming_gradient,
390390
Array<T> collapsed_gradient = incoming_gradient;
391391
collapsed_gradient = reorder(collapsed_gradient, dim4(0, 1, 3, 2));
392392
collapsed_gradient = modDims(
393-
collapsed_gradient, dim4(cDims[0] * cDims[1] * cDims[3], cDims[2]));
393+
collapsed_gradient, dim4(cDims[0] * cDims[1] * cDims[3], cDims[2]));
394394

395395
T alpha = scalar<T>(1.0);
396396
T beta = scalar<T>(0.0);

‎src/backend/cuda/kernel/random_engine.hpp

Copy file name to clipboardExpand all lines: src/backend/cuda/kernel/random_engine.hpp
+2-2Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -60,9 +60,9 @@ static const int THREADS = 256;
6060
#define HALF_HALF_FACTOR __ushort_as_half(0x80)
6161

6262
// Conversion to half adapted from Random123
63-
//#define SIGNED_HALF_FACTOR \
63+
// #define SIGNED_HALF_FACTOR \
6464
//((1.0f) / (std::numeric_limits<short>::max() + (1.0f)))
65-
//#define SIGNED_HALF_HALF_FACTOR ((0.5f) * SIGNED_HALF_FACTOR)
65+
// #define SIGNED_HALF_HALF_FACTOR ((0.5f) * SIGNED_HALF_FACTOR)
6666
//
6767
// NOTE: The following constants for half were calculated using the formulas
6868
// above. This is done so that we can avoid unnecessary computations because the

‎src/backend/oneapi/compile_module.cpp

Copy file name to clipboardExpand all lines: src/backend/oneapi/compile_module.cpp
+2-2Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -72,7 +72,7 @@ namespace arrayfire {
7272
namespace oneapi {
7373

7474
// const static string DEFAULT_MACROS_STR(
75-
// "\n\
75+
// "\n\
7676
//#ifdef USE_DOUBLE\n\
7777
//#pragma OPENCL EXTENSION cl_khr_fp64 : enable\n\
7878
//#endif\n \
@@ -84,7 +84,7 @@ namespace oneapi {
8484
//#ifndef M_PI\n \
8585
//#define
8686
// M_PI 3.1415926535897932384626433832795028841971693993751058209749445923078164\n
87-
// \
87+
// \
8888
//#endif\n \
8989
//");
9090

‎src/backend/oneapi/exampleFunction.cpp

Copy file name to clipboardExpand all lines: src/backend/oneapi/exampleFunction.cpp
+5-5Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -16,11 +16,11 @@
1616
#include <err_oneapi.hpp> // error check functions and Macros
1717
// specific to oneapi backend
1818

19-
//#include <kernel/exampleFunction.hpp> // this header under the folder
20-
// src/oneapi/kernel
21-
// defines the OneAPI kernel wrapper
22-
// function to which the main computation of your
23-
// algorithm should be relayed to
19+
// #include <kernel/exampleFunction.hpp> // this header under the folder
20+
// src/oneapi/kernel
21+
// defines the OneAPI kernel wrapper
22+
// function to which the main computation of your
23+
// algorithm should be relayed to
2424

2525
using af::dim4;
2626

‎src/backend/opencl/convolve.cpp

Copy file name to clipboardExpand all lines: src/backend/opencl/convolve.cpp
+2-2Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -185,7 +185,7 @@ Array<T> conv2DataGradient(const Array<T> &incoming_gradient,
185185
Array<T> collapsed_gradient = incoming_gradient;
186186
collapsed_gradient = reorder(collapsed_gradient, dim4(0, 1, 3, 2));
187187
collapsed_gradient = modDims(
188-
collapsed_gradient, dim4(cDims[0] * cDims[1] * cDims[3], cDims[2]));
188+
collapsed_gradient, dim4(cDims[0] * cDims[1] * cDims[3], cDims[2]));
189189

190190
Array<T> res =
191191
matmul(collapsed_gradient, collapsed_filter, AF_MAT_NONE, AF_MAT_TRANS);
@@ -224,7 +224,7 @@ Array<T> conv2FilterGradient(const Array<T> &incoming_gradient,
224224
Array<T> collapsed_gradient = incoming_gradient;
225225
collapsed_gradient = reorder(collapsed_gradient, dim4(0, 1, 3, 2));
226226
collapsed_gradient = modDims(
227-
collapsed_gradient, dim4(cDims[0] * cDims[1] * cDims[3], cDims[2]));
227+
collapsed_gradient, dim4(cDims[0] * cDims[1] * cDims[3], cDims[2]));
228228

229229
Array<T> res =
230230
matmul(unwrapped, collapsed_gradient, AF_MAT_NONE, AF_MAT_NONE);

‎src/backend/opencl/memory.cpp

Copy file name to clipboardExpand all lines: src/backend/opencl/memory.cpp
+2-2Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -204,8 +204,8 @@ size_t Allocator::getMaxMemorySize(int id) {
204204
void *Allocator::nativeAlloc(const size_t bytes) {
205205
cl_int err = CL_SUCCESS;
206206
auto ptr = static_cast<void *>(clCreateBuffer(
207-
getContext()(), CL_MEM_READ_WRITE, // NOLINT(hicpp-signed-bitwise)
208-
bytes, nullptr, &err));
207+
getContext()(), CL_MEM_READ_WRITE, // NOLINT(hicpp-signed-bitwise)
208+
bytes, nullptr, &err));
209209

210210
if (err != CL_SUCCESS) {
211211
auto str = fmt::format("Failed to allocate device memory of size {}",

‎src/backend/opencl/svd.cpp

Copy file name to clipboardExpand all lines: src/backend/opencl/svd.cpp
+2-2Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -137,8 +137,8 @@ void svd(Array<T> &arrU, Array<Tr> &arrS, Array<T> &arrVT, Array<T> &arrA,
137137

138138
if (want_vectors) {
139139
mappedU = static_cast<T *>(getQueue().enqueueMapBuffer(
140-
*arrU.get(), CL_FALSE, CL_MAP_WRITE, sizeof(T) * arrU.getOffset(),
141-
sizeof(T) * arrU.elements()));
140+
*arrU.get(), CL_FALSE, CL_MAP_WRITE, sizeof(T) * arrU.getOffset(),
141+
sizeof(T) * arrU.elements()));
142142
mappedVT = static_cast<T *>(getQueue().enqueueMapBuffer(
143143
*arrVT.get(), CL_TRUE, CL_MAP_WRITE, sizeof(T) * arrVT.getOffset(),
144144
sizeof(T) * arrVT.elements()));

‎src/backend/opencl/topk.cpp

Copy file name to clipboardExpand all lines: src/backend/opencl/topk.cpp
+3-3Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -76,13 +76,13 @@ void topk(Array<T>& vals, Array<unsigned>& idxs, const Array<T>& in,
7676
cl::Event ev_in, ev_val, ev_ind;
7777

7878
T* ptr = static_cast<T*>(getQueue().enqueueMapBuffer(
79-
*in_buf, CL_FALSE, CL_MAP_READ, 0, in.elements() * sizeof(T),
80-
nullptr, &ev_in));
79+
*in_buf, CL_FALSE, CL_MAP_READ, 0, in.elements() * sizeof(T),
80+
nullptr, &ev_in));
8181
uint* iptr = static_cast<uint*>(getQueue().enqueueMapBuffer(
8282
*ibuf, CL_FALSE, CL_MAP_READ | CL_MAP_WRITE, 0, k * sizeof(uint),
8383
nullptr, &ev_ind));
8484
T* vptr = static_cast<T*>(getQueue().enqueueMapBuffer(
85-
*vbuf, CL_FALSE, CL_MAP_WRITE, 0, k * sizeof(T), nullptr, &ev_val));
85+
*vbuf, CL_FALSE, CL_MAP_WRITE, 0, k * sizeof(T), nullptr, &ev_val));
8686

8787
vector<uint> idx(in.elements());
8888

0 commit comments

Comments
0 (0)
Morty Proxy This is a proxified and sanitized view of the page, visit original site.