summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorCedric Nugteren <web@cedricnugteren.nl>2017-09-19 19:44:34 +0200
committerCedric Nugteren <web@cedricnugteren.nl>2017-09-19 19:44:34 +0200
commitae1eeb4d1f13203ee412bc739f477570e6781090 (patch)
tree39d382f16667d4b1e9a8ce5f7d062f76e4408166 /src
parent1d2ee29cb9207ce53d52de40b790628bee2ef4da (diff)
Fixed type conversion warnings under MSVC 2013
Diffstat (limited to 'src')
-rw-r--r--src/routines/levelx/xaxpybatched.cpp8
-rw-r--r--src/routines/levelx/xgemmbatched.cpp17
2 files changed, 17 insertions, 8 deletions
diff --git a/src/routines/levelx/xaxpybatched.cpp b/src/routines/levelx/xaxpybatched.cpp
index 6a4269be..0b755ccf 100644
--- a/src/routines/levelx/xaxpybatched.cpp
+++ b/src/routines/levelx/xaxpybatched.cpp
@@ -53,8 +53,12 @@ void XaxpyBatched<T>::DoAxpyBatched(const size_t n, const std::vector<T> &alphas
}
// Upload the arguments to the device
- std::vector<int> x_offsets_int(x_offsets.begin(), x_offsets.end());
- std::vector<int> y_offsets_int(y_offsets.begin(), y_offsets.end());
+ auto x_offsets_int = std::vector<int>(batch_count);
+ auto y_offsets_int = std::vector<int>(batch_count);
+ for (auto batch = size_t{ 0 }; batch < batch_count; ++batch) {
+ x_offsets_int[batch] = static_cast<int>(x_offsets[batch]);
+ y_offsets_int[batch] = static_cast<int>(y_offsets[batch]);
+ }
auto x_offsets_device = Buffer<int>(context_, BufferAccess::kReadOnly, batch_count);
auto y_offsets_device = Buffer<int>(context_, BufferAccess::kReadOnly, batch_count);
auto alphas_device = Buffer<T>(context_, BufferAccess::kReadOnly, batch_count);
diff --git a/src/routines/levelx/xgemmbatched.cpp b/src/routines/levelx/xgemmbatched.cpp
index ee8448d2..4e9f0004 100644
--- a/src/routines/levelx/xgemmbatched.cpp
+++ b/src/routines/levelx/xgemmbatched.cpp
@@ -106,9 +106,14 @@ void XgemmBatched<T>::DoGemmBatched(const Layout layout, const Transpose a_trans
betas_device.Write(queue_, batch_count, betas);
// Converts the offset to integers
- std::vector<int> a_offsets_int(a_offsets.begin(), a_offsets.end());
- std::vector<int> b_offsets_int(b_offsets.begin(), b_offsets.end());
- std::vector<int> c_offsets_int(c_offsets.begin(), c_offsets.end());
+ auto a_offsets_int = std::vector<int>(batch_count);
+ auto b_offsets_int = std::vector<int>(batch_count);
+ auto c_offsets_int = std::vector<int>(batch_count);
+ for (auto batch = size_t{ 0 }; batch < batch_count; ++batch) {
+ a_offsets_int[batch] = static_cast<int>(a_offsets[batch]);
+ b_offsets_int[batch] = static_cast<int>(b_offsets[batch]);
+ c_offsets_int[batch] = static_cast<int>(c_offsets[batch]);
+ }
// Selects which version of the batched GEMM to run
const auto do_gemm_direct = true;
@@ -169,9 +174,9 @@ void XgemmBatched<T>::BatchedGemmIndirect(const size_t m, const size_t n, const
auto b_offsets_i = std::vector<int>(batch_count);
auto c_offsets_i = std::vector<int>(batch_count);
for (auto batch = size_t{0}; batch < batch_count; ++batch) {
- a_offsets_i[batch] = batch * a_one_i * a_two_i;
- b_offsets_i[batch] = batch * b_one_i * b_two_i;
- c_offsets_i[batch] = batch * c_one_i * c_two_i;
+ a_offsets_i[batch] = static_cast<int>(batch * a_one_i * a_two_i);
+ b_offsets_i[batch] = static_cast<int>(batch * b_one_i * b_two_i);
+ c_offsets_i[batch] = static_cast<int>(batch * c_one_i * c_two_i);
}
// Determines whether or not temporary matrices are needed