Skip to content

Commit 84fea22

Browse files
authored
Merge pull request #179 from MollySophia/fix-linux-cuda
Fix build with CUDA under linux
2 parents d622368 + 15ce960 commit 84fea22

File tree

3 files changed

+4
-5
lines changed

3 files changed

+4
-5
lines changed

CMakeLists.txt

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -172,7 +172,6 @@ elseif()
172172
set(GGML_STANDALONE ON)
173173
endif()
174174

175-
set(BUILD_SHARED_LIBS OFF)
176175
if (NOT TARGET ggml)
177176
add_subdirectory(ggml)
178177
# ... otherwise assume ggml is added by a parent CMakeLists.txt

rwkv_graph.inc

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -548,7 +548,7 @@ static struct ggml_tensor * rwkv_att_v6(
548548
ggml_reshape_1d(ctx, layer.att_time_decay, n_embed)
549549
);
550550

551-
w = rwkv_exp(ctx, ggml_neg_inplace(ctx, rwkv_exp(ctx, w)));
551+
w = rwkv_exp(ctx, ggml_neg(ctx, rwkv_exp(ctx, w)));
552552
w = ggml_reshape_4d(ctx, w, 1, head_size, head_count, sequence_length);
553553

554554
// dup is not strictly required; doing it just in case.
@@ -576,9 +576,9 @@ static struct ggml_tensor * rwkv_att_v6(
576576
x = rwkv_group_norm_eps_64e_minus5(ctx, x, head_count);
577577
// Convert back to a regular vector.
578578
x = ggml_reshape_2d(ctx, x, n_embed, sequence_length);
579-
x = ggml_add_inplace(
579+
x = ggml_add(
580580
ctx,
581-
ggml_mul_inplace(
581+
ggml_mul(
582582
ctx,
583583
x,
584584
layer.att_ln_x_weight

rwkv_operators.inc

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -158,7 +158,7 @@ static void rwkv_groupnorm_impl(
158158

159159
// Element-wise exp(x)
160160
struct ggml_tensor * rwkv_exp(struct ggml_context * ctx, struct ggml_tensor * x) {
161-
return ggml_map_custom1_inplace(ctx, x, rwkv_exp_impl, 1, NULL);
161+
return ggml_map_custom1(ctx, x, rwkv_exp_impl, 1, NULL);
162162
}
163163

164164
// Element-wise 1 - x

0 commit comments

Comments
 (0)