Skip to content

Commit ac3d6cd

Browse files
committed
fix: bugs
1 parent 7e4445b commit ac3d6cd

File tree

1 file changed

+1
-3
lines changed

1 file changed

+1
-3
lines changed

src/bindings/Llama.ts

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -670,7 +670,7 @@ function logMessageIsOnlyDots(message: string | null) {
670670
}
671671

672672
function getTransformedLogLevel(level: LlamaLogLevel, message: string, gpu: BuildGpu): LlamaLogLevel {
673-
if (level === LlamaLogLevel.warn && message.endsWith("the full capacity of the model will not be utilized"))
673+
if (level === LlamaLogLevel.warn && message.trimEnd().endsWith("the full capacity of the model will not be utilized"))
674674
return LlamaLogLevel.info;
675675
else if (level === LlamaLogLevel.warn && message.startsWith("ggml_metal_init: skipping kernel_") && message.endsWith("(not supported)"))
676676
return LlamaLogLevel.log;
@@ -690,8 +690,6 @@ function getTransformedLogLevel(level: LlamaLogLevel, message: string, gpu: Buil
690690
return LlamaLogLevel.info;
691691
else if (level === LlamaLogLevel.warn && message.startsWith("llama_init_from_model: model default pooling_type is [0], but [-1] was specified"))
692692
return LlamaLogLevel.info;
693-
else if (level === LlamaLogLevel.warn && message.startsWith("llama_context: n_ctx_seq (") && message.endsWith("- the full capacity of the model will not be utilized"))
694-
return LlamaLogLevel.info;
695693
else if (gpu === false && level === LlamaLogLevel.warn && message.startsWith("llama_adapter_lora_init_impl: lora for '") && message.endsWith("' cannot use buft 'CPU_REPACK', fallback to CPU"))
696694
return LlamaLogLevel.info;
697695
else if (gpu === "metal" && level === LlamaLogLevel.warn && message.startsWith("ggml_metal_device_init: tensor API disabled for"))

0 commit comments

Comments
 (0)