id
int64
0
886
original_context
stringlengths
648
56.6k
modified_context
stringlengths
587
47.6k
omitted_context
sequencelengths
0
19
omitted_index
sequencelengths
0
19
metadata
dict
800
diff --git a/common/arg.cpp b/common/arg.cpp index 85ba411146786..9cbf985710112 100644 --- a/common/arg.cpp +++ b/common/arg.cpp @@ -2140,6 +2140,13 @@ common_params_context common_params_parser_init(common_params & params, llama_ex params.no_mmproj = true; } ).set_examples(mmproj_examples)); + add_opt(common_arg( + {"--no-mmproj-offload"}, + "do not offload multimodal projector to GPU", + [](common_params & params) { + params.mmproj_use_gpu = false; + } + ).set_examples(mmproj_examples)); add_opt(common_arg( {"--image"}, "FILE", "path to an image file. use with multimodal models. Specify multiple times for batching", diff --git a/common/common.h b/common/common.h index 70d3ef8f27870..0a9dc0599f722 100644 --- a/common/common.h +++ b/common/common.h @@ -342,6 +342,7 @@ struct common_params { // multimodal models (see examples/llava) struct common_params_model mmproj; + bool mmproj_use_gpu = true; // use GPU for multimodal model bool no_mmproj = false; // explicitly disable multimodal model std::vector<std::string> image; // path to image file(s) diff --git a/examples/llava/mtmd-cli.cpp b/examples/llava/mtmd-cli.cpp index 19373760576a9..250e8c9a9e871 100644 --- a/examples/llava/mtmd-cli.cpp +++ b/examples/llava/mtmd-cli.cpp @@ -40,7 +40,8 @@ static void show_additional_info(int /*argc*/, char ** argv) { "Usage: %s [options] -m <model> --mmproj <mmproj> --image <image> -p <prompt>\n\n" " -m and --mmproj are required\n" " -hf user/repo can replace both -m and --mmproj in most cases\n" - " --image and -p are optional, if NOT provided, the CLI will run in chat mode\n", + " --image and -p are optional, if NOT provided, the CLI will run in chat mode\n" + " to disable using GPU for mmproj model, add --no-mmproj-offload\n", argv[0] ); } @@ -112,10 +113,10 @@ struct mtmd_cli_context { void init_vision_context(common_params & params) { const char * clip_path = params.mmproj.path.c_str(); ctx_vision.reset(mtmd_init_from_file(clip_path, model, mtmd_context_params{ - /* use_gpu */ true, + /* use_gpu */ params.mmproj_use_gpu, /* timings */ true, /* n_threads */ params.cpuparams.n_threads, - /* verbosity */ GGML_LOG_LEVEL_INFO, + /* verbosity */ params.verbosity > 0 ? GGML_LOG_LEVEL_DEBUG : GGML_LOG_LEVEL_INFO, })); if (!ctx_vision.get()) { LOG_ERR("Failed to load vision model from %s\n", clip_path);
diff --git a/common/arg.cpp b/common/arg.cpp index 85ba411146786..9cbf985710112 100644 --- a/common/arg.cpp +++ b/common/arg.cpp @@ -2140,6 +2140,13 @@ common_params_context common_params_parser_init(common_params & params, llama_ex params.no_mmproj = true; } ).set_examples(mmproj_examples)); + add_opt(common_arg( + {"--no-mmproj-offload"}, + "do not offload multimodal projector to GPU", + [](common_params & params) { + params.mmproj_use_gpu = false; + } + ).set_examples(mmproj_examples)); add_opt(common_arg( {"--image"}, "FILE", "path to an image file. use with multimodal models. Specify multiple times for batching", diff --git a/common/common.h b/common/common.h index 70d3ef8f27870..0a9dc0599f722 100644 --- a/common/common.h +++ b/common/common.h @@ -342,6 +342,7 @@ struct common_params { // multimodal models (see examples/llava) struct common_params_model mmproj; + bool mmproj_use_gpu = true; // use GPU for multimodal model bool no_mmproj = false; // explicitly disable multimodal model std::vector<std::string> image; // path to image file(s) diff --git a/examples/llava/mtmd-cli.cpp b/examples/llava/mtmd-cli.cpp index 19373760576a9..250e8c9a9e871 100644 --- a/examples/llava/mtmd-cli.cpp +++ b/examples/llava/mtmd-cli.cpp @@ -40,7 +40,8 @@ static void show_additional_info(int /*argc*/, char ** argv) { "Usage: %s [options] -m <model> --mmproj <mmproj> --image <image> -p <prompt>\n\n" " -m and --mmproj are required\n" " -hf user/repo can replace both -m and --mmproj in most cases\n" + " --image and -p are optional, if NOT provided, the CLI will run in chat mode\n" + " to disable using GPU for mmproj model, add --no-mmproj-offload\n", argv[0] ); } @@ -112,10 +113,10 @@ struct mtmd_cli_context { void init_vision_context(common_params & params) { const char * clip_path = params.mmproj.path.c_str(); ctx_vision.reset(mtmd_init_from_file(clip_path, model, mtmd_context_params{ + /* use_gpu */ params.mmproj_use_gpu, /* timings */ true, /* n_threads */ params.cpuparams.n_threads, - /* verbosity */ GGML_LOG_LEVEL_INFO, + /* verbosity */ params.verbosity > 0 ? GGML_LOG_LEVEL_DEBUG : GGML_LOG_LEVEL_INFO, })); if (!ctx_vision.get()) { LOG_ERR("Failed to load vision model from %s\n", clip_path);
[ "- \" --image and -p are optional, if NOT provided, the CLI will run in chat mode\\n\",", "- /* use_gpu */ true," ]
[ 38, 48 ]
{ "additions": 12, "author": "ngxson", "deletions": 3, "html_url": "https://github.com/ggml-org/llama.cpp/pull/13093", "issue_id": 13093, "merged_at": "2025-04-24T12:04:14Z", "omission_probability": 0.1, "pr_number": 13093, "repo": "ggml-org/llama.cpp", "title": "arg : add --no-mmproj-offload", "total_changes": 15 }
801
diff --git a/common/arg.cpp b/common/arg.cpp index 1cfd0168d95ae..85ba411146786 100644 --- a/common/arg.cpp +++ b/common/arg.cpp @@ -38,6 +38,11 @@ using json = nlohmann::ordered_json; +std::initializer_list<enum llama_example> mmproj_examples = { + LLAMA_EXAMPLE_LLAVA, + // TODO: add LLAMA_EXAMPLE_SERVER when it's ready +}; + common_arg & common_arg::set_examples(std::initializer_list<enum llama_example> examples) { this->examples = std::move(examples); return *this; @@ -641,11 +646,16 @@ static struct common_hf_file_res common_get_hf_file(const std::string &, const s // utils // -static void common_params_handle_model( +struct handle_model_result { + bool found_mmproj = false; + common_params_model mmproj; +}; + +static handle_model_result common_params_handle_model( struct common_params_model & model, const std::string & bearer_token, - const std::string & model_path_default, - bool is_mmproj = false) { // TODO: move is_mmproj to an enum when we have more files? + const std::string & model_path_default) { + handle_model_result result; // handle pre-fill default model path and url based on hf_repo and hf_file { if (!model.hf_repo.empty()) { @@ -657,7 +667,12 @@ static void common_params_handle_model( exit(1); // built without CURL, error message already printed } model.hf_repo = auto_detected.repo; - model.hf_file = is_mmproj ? auto_detected.mmprojFile : auto_detected.ggufFile; + model.hf_file = auto_detected.ggufFile; + if (!auto_detected.mmprojFile.empty()) { + result.found_mmproj = true; + result.mmproj.hf_repo = model.hf_repo; + result.mmproj.hf_file = auto_detected.mmprojFile; + } } else { model.hf_file = model.path; } @@ -694,6 +709,8 @@ static void common_params_handle_model( exit(1); } } + + return result; } const std::vector<ggml_type> kv_cache_types = { @@ -827,16 +844,25 @@ static bool common_params_parse_ex(int argc, char ** argv, common_params_context throw std::invalid_argument("error: --prompt-cache-all not supported in interactive mode yet\n"); } - common_params_handle_model(params.model, params.hf_token, DEFAULT_MODEL_PATH); - common_params_handle_model(params.speculative.model, params.hf_token, ""); - common_params_handle_model(params.vocoder.model, params.hf_token, ""); - - // allow --mmproj to be set from -hf - // assuming that mmproj is always in the same repo as text model - if (!params.model.hf_repo.empty() && ctx_arg.ex == LLAMA_EXAMPLE_LLAVA) { - params.mmproj.hf_repo = params.model.hf_repo; + // handle model and download + { + auto res = common_params_handle_model(params.model, params.hf_token, DEFAULT_MODEL_PATH); + if (params.no_mmproj) { + params.mmproj = {}; + } else if (res.found_mmproj && params.mmproj.path.empty() && params.mmproj.url.empty()) { + // optionally, handle mmproj model when -hf is specified + params.mmproj = res.mmproj; + } + // only download mmproj if the current example is using it + for (auto & ex : mmproj_examples) { + if (ctx_arg.ex == ex) { + common_params_handle_model(params.mmproj, params.hf_token, ""); + break; + } + } + common_params_handle_model(params.speculative.model, params.hf_token, ""); + common_params_handle_model(params.vocoder.model, params.hf_token, ""); } - common_params_handle_model(params.mmproj, params.hf_token, "", true); if (params.escape) { string_process_escapes(params.prompt); @@ -2095,18 +2121,25 @@ common_params_context common_params_parser_init(common_params & params, llama_ex ).set_examples({LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_NO_CONT_BATCHING")); add_opt(common_arg( {"--mmproj"}, "FILE", - "path to a multimodal projector file for LLaVA. see examples/llava/README.md", + "path to a multimodal projector file. see examples/llava/README.md", [](common_params & params, const std::string & value) { params.mmproj.path = value; } - ).set_examples({LLAMA_EXAMPLE_LLAVA})); + ).set_examples(mmproj_examples)); add_opt(common_arg( {"--mmproj-url"}, "URL", - "URL to a multimodal projector file for LLaVA. see examples/llava/README.md", + "URL to a multimodal projector file. see examples/llava/README.md", [](common_params & params, const std::string & value) { params.mmproj.url = value; } - ).set_examples({LLAMA_EXAMPLE_LLAVA})); + ).set_examples(mmproj_examples)); + add_opt(common_arg( + {"--no-mmproj"}, + "explicitly disable multimodal projector, useful when using -hf", + [](common_params & params) { + params.no_mmproj = true; + } + ).set_examples(mmproj_examples)); add_opt(common_arg( {"--image"}, "FILE", "path to an image file. use with multimodal models. Specify multiple times for batching", @@ -2381,6 +2414,7 @@ common_params_context common_params_parser_init(common_params & params, llama_ex add_opt(common_arg( {"-hf", "-hfr", "--hf-repo"}, "<user>/<model>[:quant]", "Hugging Face model repository; quant is optional, case-insensitive, default to Q4_K_M, or falls back to the first file in the repo if Q4_K_M doesn't exist.\n" + "mmproj is also downloaded automatically if available. to disable, add --no-mmproj\n" "example: unsloth/phi-4-GGUF:q4_k_m\n" "(default: unused)", [](common_params & params, const std::string & value) { diff --git a/common/common.h b/common/common.h index e6eaa8e80cf05..70d3ef8f27870 100644 --- a/common/common.h +++ b/common/common.h @@ -342,6 +342,7 @@ struct common_params { // multimodal models (see examples/llava) struct common_params_model mmproj; + bool no_mmproj = false; // explicitly disable multimodal model std::vector<std::string> image; // path to image file(s) // embedding diff --git a/examples/llava/mtmd-cli.cpp b/examples/llava/mtmd-cli.cpp index e80845a2c5469..b4b226bebb119 100644 --- a/examples/llava/mtmd-cli.cpp +++ b/examples/llava/mtmd-cli.cpp @@ -249,6 +249,7 @@ int main(int argc, char ** argv) { if (params.mmproj.path.empty()) { show_additional_info(argc, argv); + LOG_ERR("ERR: Missing --mmproj argument\n"); return 1; }
diff --git a/common/arg.cpp b/common/arg.cpp index 1cfd0168d95ae..85ba411146786 100644 --- a/common/arg.cpp +++ b/common/arg.cpp @@ -38,6 +38,11 @@ using json = nlohmann::ordered_json; +std::initializer_list<enum llama_example> mmproj_examples = { + // TODO: add LLAMA_EXAMPLE_SERVER when it's ready common_arg & common_arg::set_examples(std::initializer_list<enum llama_example> examples) { this->examples = std::move(examples); return *this; @@ -641,11 +646,16 @@ static struct common_hf_file_res common_get_hf_file(const std::string &, const s // utils // -static void common_params_handle_model( +struct handle_model_result { + bool found_mmproj = false; + common_params_model mmproj; +static handle_model_result common_params_handle_model( struct common_params_model & model, const std::string & bearer_token, - const std::string & model_path_default, - bool is_mmproj = false) { // TODO: move is_mmproj to an enum when we have more files? + const std::string & model_path_default) { + handle_model_result result; // handle pre-fill default model path and url based on hf_repo and hf_file { if (!model.hf_repo.empty()) { @@ -657,7 +667,12 @@ static void common_params_handle_model( exit(1); // built without CURL, error message already printed } model.hf_repo = auto_detected.repo; - model.hf_file = is_mmproj ? auto_detected.mmprojFile : auto_detected.ggufFile; + model.hf_file = auto_detected.ggufFile; + if (!auto_detected.mmprojFile.empty()) { + result.mmproj.hf_repo = model.hf_repo; + result.mmproj.hf_file = auto_detected.mmprojFile; + } } else { model.hf_file = model.path; } @@ -694,6 +709,8 @@ static void common_params_handle_model( exit(1); + return result; } const std::vector<ggml_type> kv_cache_types = { @@ -827,16 +844,25 @@ static bool common_params_parse_ex(int argc, char ** argv, common_params_context throw std::invalid_argument("error: --prompt-cache-all not supported in interactive mode yet\n"); - common_params_handle_model(params.speculative.model, params.hf_token, ""); - common_params_handle_model(params.vocoder.model, params.hf_token, ""); - - if (!params.model.hf_repo.empty() && ctx_arg.ex == LLAMA_EXAMPLE_LLAVA) { - params.mmproj.hf_repo = params.model.hf_repo; + // handle model and download + { + auto res = common_params_handle_model(params.model, params.hf_token, DEFAULT_MODEL_PATH); + if (params.no_mmproj) { + params.mmproj = {}; + // optionally, handle mmproj model when -hf is specified + params.mmproj = res.mmproj; + // only download mmproj if the current example is using it + for (auto & ex : mmproj_examples) { + if (ctx_arg.ex == ex) { + common_params_handle_model(params.mmproj, params.hf_token, ""); + break; + } + common_params_handle_model(params.speculative.model, params.hf_token, ""); + common_params_handle_model(params.vocoder.model, params.hf_token, ""); - common_params_handle_model(params.mmproj, params.hf_token, "", true); if (params.escape) { string_process_escapes(params.prompt); @@ -2095,18 +2121,25 @@ common_params_context common_params_parser_init(common_params & params, llama_ex ).set_examples({LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_NO_CONT_BATCHING")); {"--mmproj"}, "FILE", - "path to a multimodal projector file for LLaVA. see examples/llava/README.md", + "path to a multimodal projector file. see examples/llava/README.md", params.mmproj.path = value; {"--mmproj-url"}, "URL", - "URL to a multimodal projector file for LLaVA. see examples/llava/README.md", + "URL to a multimodal projector file. see examples/llava/README.md", params.mmproj.url = value; + add_opt(common_arg( + {"--no-mmproj"}, + "explicitly disable multimodal projector, useful when using -hf", + [](common_params & params) { + params.no_mmproj = true; {"--image"}, "FILE", "path to an image file. use with multimodal models. Specify multiple times for batching", @@ -2381,6 +2414,7 @@ common_params_context common_params_parser_init(common_params & params, llama_ex {"-hf", "-hfr", "--hf-repo"}, "<user>/<model>[:quant]", "Hugging Face model repository; quant is optional, case-insensitive, default to Q4_K_M, or falls back to the first file in the repo if Q4_K_M doesn't exist.\n" + "mmproj is also downloaded automatically if available. to disable, add --no-mmproj\n" "example: unsloth/phi-4-GGUF:q4_k_m\n" "(default: unused)", diff --git a/common/common.h b/common/common.h index e6eaa8e80cf05..70d3ef8f27870 100644 --- a/common/common.h +++ b/common/common.h @@ -342,6 +342,7 @@ struct common_params { // multimodal models (see examples/llava) struct common_params_model mmproj; + bool no_mmproj = false; // explicitly disable multimodal model std::vector<std::string> image; // path to image file(s) // embedding diff --git a/examples/llava/mtmd-cli.cpp b/examples/llava/mtmd-cli.cpp index e80845a2c5469..b4b226bebb119 100644 --- a/examples/llava/mtmd-cli.cpp +++ b/examples/llava/mtmd-cli.cpp @@ -249,6 +249,7 @@ int main(int argc, char ** argv) { if (params.mmproj.path.empty()) { show_additional_info(argc, argv); + LOG_ERR("ERR: Missing --mmproj argument\n"); return 1;
[ "+ LLAMA_EXAMPLE_LLAVA,", "+ result.found_mmproj = true;", "- common_params_handle_model(params.model, params.hf_token, DEFAULT_MODEL_PATH);", "- // allow --mmproj to be set from -hf", "- // assuming that mmproj is always in the same repo as text model", "+ } else if (res.found_mmproj && params.mmproj.path.empty() && params.mmproj.url.empty()) {" ]
[ 9, 43, 63, 67, 68, 76 ]
{ "additions": 53, "author": "ngxson", "deletions": 17, "html_url": "https://github.com/ggml-org/llama.cpp/pull/13082", "issue_id": 13082, "merged_at": "2025-04-24T10:14:13Z", "omission_probability": 0.1, "pr_number": 13082, "repo": "ggml-org/llama.cpp", "title": "arg : clean up handling --mmproj with -hf", "total_changes": 70 }
802
diff --git a/examples/llava/mtmd-cli.cpp b/examples/llava/mtmd-cli.cpp index e80845a2c5469..89af7331a1658 100644 --- a/examples/llava/mtmd-cli.cpp +++ b/examples/llava/mtmd-cli.cpp @@ -24,7 +24,9 @@ #include <signal.h> #endif -static bool g_is_generating = false; +// volatile, because of signal being an interrupt +static volatile bool g_is_generating = false; +static volatile bool g_is_interrupted = false; /** * Please note that this is NOT a production-ready stuff. @@ -50,8 +52,10 @@ static void sigint_handler(int signo) { g_is_generating = false; } else { console::cleanup(); - LOG("\nInterrupted by user\n"); - _exit(130); + if (g_is_interrupted) { + _exit(1); + } + g_is_interrupted = true; } } } @@ -167,7 +171,7 @@ struct decode_embd_batch { static int generate_response(mtmd_cli_context & ctx, common_sampler * smpl, int n_predict) { llama_tokens generated_tokens; for (int i = 0; i < n_predict; i++) { - if (i > n_predict || !g_is_generating) { + if (i > n_predict || !g_is_generating || g_is_interrupted) { printf("\n"); break; } @@ -184,6 +188,11 @@ static int generate_response(mtmd_cli_context & ctx, common_sampler * smpl, int printf("%s", common_token_to_piece(ctx.lctx, token_id).c_str()); fflush(stdout); + if (g_is_interrupted) { + printf("\n"); + break; + } + // eval the token common_batch_clear(ctx.batch); common_batch_add(ctx.batch, token_id, ctx.n_past++, {0}, true); @@ -219,6 +228,9 @@ static int eval_message(mtmd_cli_context & ctx, common_chat_msg & msg, std::vect text.add_special = add_bos; text.parse_special = true; mtmd_input_chunks chunks; + + if (g_is_interrupted) return 0; + int32_t res = mtmd_tokenize(ctx.ctx_vision.get(), chunks, text, bitmaps); if (res != 0) { LOG_ERR("Unable to tokenize prompt, res = %d\n", res); @@ -276,6 +288,8 @@ int main(int argc, char ** argv) { #endif } + if (g_is_interrupted) return 130; + if (is_single_turn) { g_is_generating = true; if (params.prompt.find("<__image__>") == std::string::npos) { @@ -287,7 +301,7 @@ int main(int argc, char ** argv) { if (eval_message(ctx, msg, params.image, true)) { return 1; } - if (generate_response(ctx, smpl, n_predict)) { + if (!g_is_interrupted && generate_response(ctx, smpl, n_predict)) { return 1; } @@ -302,12 +316,13 @@ int main(int argc, char ** argv) { std::vector<std::string> images_fname; std::string content; - while (true) { + while (!g_is_interrupted) { g_is_generating = false; LOG("\n> "); console::set_display(console::user_input); std::string line; console::readline(line, false); + if (g_is_interrupted) break; console::set_display(console::reset); line = string_strip(line); if (line.empty()) { @@ -335,6 +350,7 @@ int main(int argc, char ** argv) { msg.role = "user"; msg.content = content; int ret = eval_message(ctx, msg, images_fname, is_first_msg); + if (g_is_interrupted) break; if (ret == 2) { // non-fatal error images_fname.clear(); @@ -352,6 +368,7 @@ int main(int argc, char ** argv) { is_first_msg = false; } } + if (g_is_interrupted) LOG("\nInterrupted by user\n"); llama_perf_context_print(ctx.lctx); - return 0; + return g_is_interrupted ? 130 : 0; }
diff --git a/examples/llava/mtmd-cli.cpp b/examples/llava/mtmd-cli.cpp index e80845a2c5469..89af7331a1658 100644 --- a/examples/llava/mtmd-cli.cpp +++ b/examples/llava/mtmd-cli.cpp @@ -24,7 +24,9 @@ #include <signal.h> +// volatile, because of signal being an interrupt +static volatile bool g_is_generating = false; +static volatile bool g_is_interrupted = false; /** * Please note that this is NOT a production-ready stuff. @@ -50,8 +52,10 @@ static void sigint_handler(int signo) { } else { console::cleanup(); - LOG("\nInterrupted by user\n"); - _exit(130); + if (g_is_interrupted) { + } + g_is_interrupted = true; @@ -167,7 +171,7 @@ struct decode_embd_batch { static int generate_response(mtmd_cli_context & ctx, common_sampler * smpl, int n_predict) { llama_tokens generated_tokens; for (int i = 0; i < n_predict; i++) { - if (i > n_predict || !g_is_generating) { + if (i > n_predict || !g_is_generating || g_is_interrupted) { printf("\n"); break; @@ -184,6 +188,11 @@ static int generate_response(mtmd_cli_context & ctx, common_sampler * smpl, int printf("%s", common_token_to_piece(ctx.lctx, token_id).c_str()); fflush(stdout); + if (g_is_interrupted) { + printf("\n"); + } // eval the token common_batch_clear(ctx.batch); common_batch_add(ctx.batch, token_id, ctx.n_past++, {0}, true); @@ -219,6 +228,9 @@ static int eval_message(mtmd_cli_context & ctx, common_chat_msg & msg, std::vect text.add_special = add_bos; text.parse_special = true; mtmd_input_chunks chunks; + if (g_is_interrupted) return 0; int32_t res = mtmd_tokenize(ctx.ctx_vision.get(), chunks, text, bitmaps); if (res != 0) { LOG_ERR("Unable to tokenize prompt, res = %d\n", res); @@ -276,6 +288,8 @@ int main(int argc, char ** argv) { + if (g_is_interrupted) return 130; if (is_single_turn) { g_is_generating = true; if (params.prompt.find("<__image__>") == std::string::npos) { @@ -287,7 +301,7 @@ int main(int argc, char ** argv) { if (eval_message(ctx, msg, params.image, true)) { - if (generate_response(ctx, smpl, n_predict)) { + if (!g_is_interrupted && generate_response(ctx, smpl, n_predict)) { @@ -302,12 +316,13 @@ int main(int argc, char ** argv) { std::vector<std::string> images_fname; std::string content; - while (true) { + while (!g_is_interrupted) { LOG("\n> "); console::set_display(console::user_input); std::string line; console::readline(line, false); console::set_display(console::reset); line = string_strip(line); if (line.empty()) { @@ -335,6 +350,7 @@ int main(int argc, char ** argv) { msg.role = "user"; msg.content = content; int ret = eval_message(ctx, msg, images_fname, is_first_msg); if (ret == 2) { // non-fatal error images_fname.clear(); @@ -352,6 +368,7 @@ int main(int argc, char ** argv) { is_first_msg = false; + if (g_is_interrupted) LOG("\nInterrupted by user\n"); llama_perf_context_print(ctx.lctx); - return 0; + return g_is_interrupted ? 130 : 0;
[ "-static bool g_is_generating = false;", "+ _exit(1);", "+ break;" ]
[ 8, 22, 43 ]
{ "additions": 24, "author": "pl752", "deletions": 7, "html_url": "https://github.com/ggml-org/llama.cpp/pull/13080", "issue_id": 13080, "merged_at": "2025-04-23T21:32:35Z", "omission_probability": 0.1, "pr_number": 13080, "repo": "ggml-org/llama.cpp", "title": "llama-mtmd-cli: Sigint rework in mtmd vision example", "total_changes": 31 }
803
diff --git a/examples/rpc/rpc-server.cpp b/examples/rpc/rpc-server.cpp index 9db5542570de8..0277e25cb5ec2 100644 --- a/examples/rpc/rpc-server.cpp +++ b/examples/rpc/rpc-server.cpp @@ -22,6 +22,7 @@ #include "ggml-rpc.h" #ifdef _WIN32 +# define NOMINMAX # define DIRECTORY_SEPARATOR '\\' # include <locale> # include <windows.h> @@ -37,6 +38,8 @@ #include <stdio.h> #include <vector> #include <filesystem> +#include <algorithm> +#include <thread> namespace fs = std::filesystem; @@ -150,12 +153,14 @@ struct rpc_server_params { int port = 50052; size_t backend_mem = 0; bool use_cache = false; + int n_threads = std::max(1U, std::thread::hardware_concurrency()/2); }; static void print_usage(int /*argc*/, char ** argv, rpc_server_params params) { fprintf(stderr, "Usage: %s [options]\n\n", argv[0]); fprintf(stderr, "options:\n"); fprintf(stderr, " -h, --help show this help message and exit\n"); + fprintf(stderr, " -t, --threads number of threads for the CPU backend (default: %d)\n", params.n_threads); fprintf(stderr, " -H HOST, --host HOST host to bind to (default: %s)\n", params.host.c_str()); fprintf(stderr, " -p PORT, --port PORT port to bind to (default: %d)\n", params.port); fprintf(stderr, " -m MEM, --mem MEM backend memory size (in MB)\n"); @@ -172,6 +177,15 @@ static bool rpc_server_params_parse(int argc, char ** argv, rpc_server_params & return false; } params.host = argv[i]; + } else if (arg == "-t" || arg == "--threads") { + if (++i >= argc) { + return false; + } + params.n_threads = std::stoi(argv[i]); + if (params.n_threads <= 0) { + fprintf(stderr, "error: invalid number of threads: %d\n", params.n_threads); + return false; + } } else if (arg == "-p" || arg == "--port") { if (++i >= argc) { return false; @@ -199,7 +213,7 @@ static bool rpc_server_params_parse(int argc, char ** argv, rpc_server_params & return true; } -static ggml_backend_t create_backend() { +static ggml_backend_t create_backend(const rpc_server_params & params) { ggml_backend_t backend = NULL; #ifdef GGML_USE_CUDA fprintf(stderr, "%s: using CUDA backend\n", __func__); @@ -231,6 +245,7 @@ static ggml_backend_t create_backend() { if (!backend) { fprintf(stderr, "%s: using CPU backend\n", __func__); backend = ggml_backend_cpu_init(); + ggml_backend_cpu_set_n_threads(backend, params.n_threads); } return backend; } @@ -275,7 +290,7 @@ int main(int argc, char * argv[]) { fprintf(stderr, "\n"); } - ggml_backend_t backend = create_backend(); + ggml_backend_t backend = create_backend(params); if (!backend) { fprintf(stderr, "Failed to create backend\n"); return 1;
diff --git a/examples/rpc/rpc-server.cpp b/examples/rpc/rpc-server.cpp index 9db5542570de8..0277e25cb5ec2 100644 --- a/examples/rpc/rpc-server.cpp +++ b/examples/rpc/rpc-server.cpp @@ -22,6 +22,7 @@ #include "ggml-rpc.h" #ifdef _WIN32 +# define NOMINMAX # define DIRECTORY_SEPARATOR '\\' # include <locale> # include <windows.h> @@ -37,6 +38,8 @@ #include <stdio.h> #include <vector> #include <filesystem> +#include <algorithm> +#include <thread> namespace fs = std::filesystem; @@ -150,12 +153,14 @@ struct rpc_server_params { int port = 50052; size_t backend_mem = 0; bool use_cache = false; + int n_threads = std::max(1U, std::thread::hardware_concurrency()/2); }; static void print_usage(int /*argc*/, char ** argv, rpc_server_params params) { fprintf(stderr, "Usage: %s [options]\n\n", argv[0]); fprintf(stderr, "options:\n"); fprintf(stderr, " -h, --help show this help message and exit\n"); + fprintf(stderr, " -t, --threads number of threads for the CPU backend (default: %d)\n", params.n_threads); fprintf(stderr, " -H HOST, --host HOST host to bind to (default: %s)\n", params.host.c_str()); fprintf(stderr, " -p PORT, --port PORT port to bind to (default: %d)\n", params.port); fprintf(stderr, " -m MEM, --mem MEM backend memory size (in MB)\n"); @@ -172,6 +177,15 @@ static bool rpc_server_params_parse(int argc, char ** argv, rpc_server_params & } params.host = argv[i]; + } else if (arg == "-t" || arg == "--threads") { + if (++i >= argc) { + params.n_threads = std::stoi(argv[i]); + if (params.n_threads <= 0) { + fprintf(stderr, "error: invalid number of threads: %d\n", params.n_threads); } else if (arg == "-p" || arg == "--port") { if (++i >= argc) { @@ -199,7 +213,7 @@ static bool rpc_server_params_parse(int argc, char ** argv, rpc_server_params & return true; -static ggml_backend_t create_backend() { +static ggml_backend_t create_backend(const rpc_server_params & params) { ggml_backend_t backend = NULL; #ifdef GGML_USE_CUDA fprintf(stderr, "%s: using CUDA backend\n", __func__); @@ -231,6 +245,7 @@ static ggml_backend_t create_backend() { fprintf(stderr, "%s: using CPU backend\n", __func__); backend = ggml_backend_cpu_init(); return backend; @@ -275,7 +290,7 @@ int main(int argc, char * argv[]) { fprintf(stderr, "\n"); - ggml_backend_t backend = create_backend(); + ggml_backend_t backend = create_backend(params); fprintf(stderr, "Failed to create backend\n"); return 1;
[ "+ ggml_backend_cpu_set_n_threads(backend, params.n_threads);" ]
[ 65 ]
{ "additions": 17, "author": "rgerganov", "deletions": 2, "html_url": "https://github.com/ggml-org/llama.cpp/pull/13060", "issue_id": 13060, "merged_at": "2025-04-23T07:32:49Z", "omission_probability": 0.1, "pr_number": 13060, "repo": "ggml-org/llama.cpp", "title": "rpc : add command line option for number of threads for the CPU backend", "total_changes": 19 }
804
diff --git a/ggml/CMakeLists.txt b/ggml/CMakeLists.txt index d33f843b417cf..1360a0df594f3 100644 --- a/ggml/CMakeLists.txt +++ b/ggml/CMakeLists.txt @@ -107,6 +107,7 @@ message(DEBUG "INS_ENB : ${INS_ENB}") option(GGML_CPU_HBM "ggml: use memkind for CPU HBM" OFF) option(GGML_CPU_AARCH64 "ggml: use runtime weight conversion of Q4_0 to Q4_X_X" ON) option(GGML_CPU_KLEIDIAI "ggml: use KleidiAI optimized kernels if applicable" OFF) +option(GGML_SSE42 "ggml: enable SSE 4.2" ${INS_ENB}) option(GGML_AVX "ggml: enable AVX" ${INS_ENB}) option(GGML_AVX_VNNI "ggml: enable AVX-VNNI" OFF) option(GGML_AVX2 "ggml: enable AVX2" ${INS_ENB}) diff --git a/ggml/src/CMakeLists.txt b/ggml/src/CMakeLists.txt index f00700da71fcd..43d9fc4fe25e0 100644 --- a/ggml/src/CMakeLists.txt +++ b/ggml/src/CMakeLists.txt @@ -267,6 +267,7 @@ function(ggml_add_cpu_backend_variant tag_name) set(GGML_CPU_TAG_NAME ${tag_name}) # other: OPENMP LLAMAFILE CPU_HBM foreach (feat NATIVE + SSE42 AVX AVX2 BMI2 AVX_VNNI FMA F16C AVX512 AVX512_VBMI AVX512_VNNI AVX512_BF16 AMX_TILE AMX_INT8 AMX_BF16) @@ -286,14 +287,16 @@ if (GGML_CPU_ALL_VARIANTS) if (NOT GGML_BACKEND_DL) message(FATAL_ERROR "GGML_CPU_ALL_VARIANTS requires GGML_BACKEND_DL") endif() - ggml_add_cpu_backend_variant(sandybridge AVX) - ggml_add_cpu_backend_variant(haswell AVX F16C AVX2 BMI2 FMA) - ggml_add_cpu_backend_variant(skylakex AVX F16C AVX2 BMI2 FMA AVX512) - ggml_add_cpu_backend_variant(icelake AVX F16C AVX2 BMI2 FMA AVX512 AVX512_VBMI AVX512_VNNI) - ggml_add_cpu_backend_variant(alderlake AVX F16C AVX2 BMI2 FMA AVX_VNNI) + ggml_add_cpu_backend_variant(x64) + ggml_add_cpu_backend_variant(sse42 SSE42) + ggml_add_cpu_backend_variant(sandybridge SSE42 AVX) + ggml_add_cpu_backend_variant(haswell SSE42 AVX F16C AVX2 BMI2 FMA) + ggml_add_cpu_backend_variant(skylakex SSE42 AVX F16C AVX2 BMI2 FMA AVX512) + ggml_add_cpu_backend_variant(icelake SSE42 AVX F16C AVX2 BMI2 FMA AVX512 AVX512_VBMI AVX512_VNNI) + ggml_add_cpu_backend_variant(alderlake SSE42 AVX F16C AVX2 BMI2 FMA AVX_VNNI) if (NOT MSVC) # MSVC doesn't support AMX - ggml_add_cpu_backend_variant(sapphirerapids AVX F16C AVX2 BMI2 FMA AVX512 AVX512_VBMI AVX512_VNNI AVX512_BF16 AMX_TILE AMX_INT8) + ggml_add_cpu_backend_variant(sapphirerapids SSE42 AVX F16C AVX2 BMI2 FMA AVX512 AVX512_VBMI AVX512_VNNI AVX512_BF16 AMX_TILE AMX_INT8) endif() elseif (GGML_CPU) ggml_add_cpu_backend_variant_impl("") diff --git a/ggml/src/ggml-cpu/CMakeLists.txt b/ggml/src/ggml-cpu/CMakeLists.txt index e73a3b69b5da2..6a652738c10a9 100644 --- a/ggml/src/ggml-cpu/CMakeLists.txt +++ b/ggml/src/ggml-cpu/CMakeLists.txt @@ -222,7 +222,7 @@ function(ggml_add_cpu_backend_variant_impl tag_name) elseif (GGML_AVX) list(APPEND ARCH_FLAGS /arch:AVX) list(APPEND ARCH_DEFINITIONS GGML_AVX) - else () + elseif (GGML_SSE42) list(APPEND ARCH_FLAGS /arch:SSE4.2) list(APPEND ARCH_DEFINITIONS GGML_SSE42) endif() @@ -237,8 +237,10 @@ function(ggml_add_cpu_backend_variant_impl tag_name) if (GGML_NATIVE) list(APPEND ARCH_FLAGS -march=native) else () - list(APPEND ARCH_FLAGS -msse4.2) - list(APPEND ARCH_DEFINITIONS GGML_SSE42) + if (GGML_SSE42) + list(APPEND ARCH_FLAGS -msse4.2) + list(APPEND ARCH_DEFINITIONS GGML_SSE42) + endif() if (GGML_F16C) list(APPEND ARCH_FLAGS -mf16c) list(APPEND ARCH_DEFINITIONS GGML_F16C) diff --git a/ggml/src/ggml-cpu/cpu-feats-x86.cpp b/ggml/src/ggml-cpu/cpu-feats-x86.cpp index 902ee4346660c..d775a0363858d 100644 --- a/ggml/src/ggml-cpu/cpu-feats-x86.cpp +++ b/ggml/src/ggml-cpu/cpu-feats-x86.cpp @@ -263,7 +263,7 @@ void test_x86_is() { static int ggml_backend_cpu_x86_score() { // FIXME: this does not check for OS support - int score = 0; + int score = 1; cpuid_x86 is; #ifdef GGML_FMA
diff --git a/ggml/CMakeLists.txt b/ggml/CMakeLists.txt index d33f843b417cf..1360a0df594f3 100644 --- a/ggml/CMakeLists.txt +++ b/ggml/CMakeLists.txt @@ -107,6 +107,7 @@ message(DEBUG "INS_ENB : ${INS_ENB}") option(GGML_CPU_HBM "ggml: use memkind for CPU HBM" OFF) option(GGML_CPU_AARCH64 "ggml: use runtime weight conversion of Q4_0 to Q4_X_X" ON) option(GGML_CPU_KLEIDIAI "ggml: use KleidiAI optimized kernels if applicable" OFF) +option(GGML_SSE42 "ggml: enable SSE 4.2" ${INS_ENB}) option(GGML_AVX "ggml: enable AVX" ${INS_ENB}) option(GGML_AVX_VNNI "ggml: enable AVX-VNNI" OFF) option(GGML_AVX2 "ggml: enable AVX2" ${INS_ENB}) diff --git a/ggml/src/CMakeLists.txt b/ggml/src/CMakeLists.txt index f00700da71fcd..43d9fc4fe25e0 100644 --- a/ggml/src/CMakeLists.txt +++ b/ggml/src/CMakeLists.txt @@ -267,6 +267,7 @@ function(ggml_add_cpu_backend_variant tag_name) set(GGML_CPU_TAG_NAME ${tag_name}) # other: OPENMP LLAMAFILE CPU_HBM foreach (feat NATIVE + SSE42 AVX AVX2 BMI2 AVX_VNNI FMA F16C AVX512 AVX512_VBMI AVX512_VNNI AVX512_BF16 AMX_TILE AMX_INT8 AMX_BF16) @@ -286,14 +287,16 @@ if (GGML_CPU_ALL_VARIANTS) if (NOT GGML_BACKEND_DL) message(FATAL_ERROR "GGML_CPU_ALL_VARIANTS requires GGML_BACKEND_DL") - ggml_add_cpu_backend_variant(haswell AVX F16C AVX2 BMI2 FMA) - ggml_add_cpu_backend_variant(skylakex AVX F16C AVX2 BMI2 FMA AVX512) - ggml_add_cpu_backend_variant(icelake AVX F16C AVX2 BMI2 FMA AVX512 AVX512_VBMI AVX512_VNNI) - ggml_add_cpu_backend_variant(alderlake AVX F16C AVX2 BMI2 FMA AVX_VNNI) + ggml_add_cpu_backend_variant(x64) + ggml_add_cpu_backend_variant(sse42 SSE42) + ggml_add_cpu_backend_variant(sandybridge SSE42 AVX) + ggml_add_cpu_backend_variant(haswell SSE42 AVX F16C AVX2 BMI2 FMA) + ggml_add_cpu_backend_variant(skylakex SSE42 AVX F16C AVX2 BMI2 FMA AVX512) if (NOT MSVC) # MSVC doesn't support AMX - ggml_add_cpu_backend_variant(sapphirerapids AVX F16C AVX2 BMI2 FMA AVX512 AVX512_VBMI AVX512_VNNI AVX512_BF16 AMX_TILE AMX_INT8) + ggml_add_cpu_backend_variant(sapphirerapids SSE42 AVX F16C AVX2 BMI2 FMA AVX512 AVX512_VBMI AVX512_VNNI AVX512_BF16 AMX_TILE AMX_INT8) elseif (GGML_CPU) ggml_add_cpu_backend_variant_impl("") diff --git a/ggml/src/ggml-cpu/CMakeLists.txt b/ggml/src/ggml-cpu/CMakeLists.txt index e73a3b69b5da2..6a652738c10a9 100644 --- a/ggml/src/ggml-cpu/CMakeLists.txt +++ b/ggml/src/ggml-cpu/CMakeLists.txt @@ -222,7 +222,7 @@ function(ggml_add_cpu_backend_variant_impl tag_name) elseif (GGML_AVX) list(APPEND ARCH_FLAGS /arch:AVX) list(APPEND ARCH_DEFINITIONS GGML_AVX) - else () + elseif (GGML_SSE42) list(APPEND ARCH_FLAGS /arch:SSE4.2) list(APPEND ARCH_DEFINITIONS GGML_SSE42) endif() @@ -237,8 +237,10 @@ function(ggml_add_cpu_backend_variant_impl tag_name) if (GGML_NATIVE) list(APPEND ARCH_FLAGS -march=native) else () - list(APPEND ARCH_FLAGS -msse4.2) - list(APPEND ARCH_DEFINITIONS GGML_SSE42) + if (GGML_SSE42) + list(APPEND ARCH_FLAGS -msse4.2) if (GGML_F16C) list(APPEND ARCH_FLAGS -mf16c) list(APPEND ARCH_DEFINITIONS GGML_F16C) diff --git a/ggml/src/ggml-cpu/cpu-feats-x86.cpp b/ggml/src/ggml-cpu/cpu-feats-x86.cpp index 902ee4346660c..d775a0363858d 100644 --- a/ggml/src/ggml-cpu/cpu-feats-x86.cpp +++ b/ggml/src/ggml-cpu/cpu-feats-x86.cpp @@ -263,7 +263,7 @@ void test_x86_is() { static int ggml_backend_cpu_x86_score() { // FIXME: this does not check for OS support - int score = 0; + int score = 1; cpuid_x86 is; #ifdef GGML_FMA
[ "- ggml_add_cpu_backend_variant(sandybridge AVX)", "+ ggml_add_cpu_backend_variant(icelake SSE42 AVX F16C AVX2 BMI2 FMA AVX512 AVX512_VBMI AVX512_VNNI)", "+ ggml_add_cpu_backend_variant(alderlake SSE42 AVX F16C AVX2 BMI2 FMA AVX_VNNI)", "+ list(APPEND ARCH_DEFINITIONS GGML_SSE42)", "+ endif()" ]
[ 28, 38, 39, 68, 69 ]
{ "additions": 16, "author": "slaren", "deletions": 10, "html_url": "https://github.com/ggml-org/llama.cpp/pull/12871", "issue_id": 12871, "merged_at": "2025-04-21T16:13:52Z", "omission_probability": 0.1, "pr_number": 12871, "repo": "ggml-org/llama.cpp", "title": "ggml : add SSE 4.2 and x64 base variant for CPUs without AVX", "total_changes": 26 }
805
diff --git a/examples/quantize/quantize.cpp b/examples/quantize/quantize.cpp index a4468b1698722..0355311dc5c06 100644 --- a/examples/quantize/quantize.cpp +++ b/examples/quantize/quantize.cpp @@ -9,6 +9,7 @@ #include <fstream> #include <cmath> #include <cctype> +#include <algorithm> struct quant_option { std::string name; @@ -16,7 +17,7 @@ struct quant_option { std::string desc; }; -static const std::vector<struct quant_option> QUANT_OPTIONS = { +static const std::vector<quant_option> QUANT_OPTIONS = { { "Q4_0", LLAMA_FTYPE_MOSTLY_Q4_0, " 4.34G, +0.4685 ppl @ Llama-3-8B", }, { "Q4_1", LLAMA_FTYPE_MOSTLY_Q4_1, " 4.78G, +0.4511 ppl @ Llama-3-8B", }, { "Q5_0", LLAMA_FTYPE_MOSTLY_Q5_0, " 5.21G, +0.1316 ppl @ Llama-3-8B", }, @@ -105,7 +106,8 @@ static bool try_parse_ftype(const std::string & ftype_str_in, llama_ftype & ftyp // [[noreturn]] static void usage(const char * executable) { - printf("usage: %s [--help] [--allow-requantize] [--leave-output-tensor] [--pure] [--imatrix] [--include-weights] [--exclude-weights] [--output-tensor-type] [--token-embedding-type] [--override-kv] model-f32.gguf [model-quant.gguf] type [nthreads]\n\n", executable); + printf("usage: %s [--help] [--allow-requantize] [--leave-output-tensor] [--pure] [--imatrix] [--include-weights] [--exclude-weights] [--output-tensor-type]\n", executable); + printf(" [--token-embedding-type] [--tensor-type] [--keep-split] [--override-kv] model-f32.gguf [model-quant.gguf] type [nthreads]\n\n"); printf(" --allow-requantize: Allows requantizing tensors that have already been quantized. Warning: This can severely reduce quality compared to quantizing from 16bit or 32bit\n"); printf(" --leave-output-tensor: Will leave output.weight un(re)quantized. Increases model size but may also increase quality, especially when requantizing\n"); printf(" --pure: Disable k-quant mixtures and quantize all tensors to the same type\n"); @@ -114,6 +116,8 @@ static void usage(const char * executable) { printf(" --exclude-weights tensor_name: use importance matrix for this/these tensor(s)\n"); printf(" --output-tensor-type ggml_type: use this ggml_type for the output.weight tensor\n"); printf(" --token-embedding-type ggml_type: use this ggml_type for the token embeddings tensor\n"); + printf(" --tensor-type TENSOR=TYPE: quantize this tensor to this ggml_type. example: --tensor-type attn_q=q8_0\n"); + printf(" Advanced option to selectively quantize tensors. May be specified multiple times.\n"); printf(" --keep-split: will generate quantized model in the same shards as input\n"); printf(" --override-kv KEY=TYPE:VALUE\n"); printf(" Advanced option to override model metadata by key in the quantized model. May be specified multiple times.\n"); @@ -244,6 +248,107 @@ static ggml_type parse_ggml_type(const char * arg) { return GGML_TYPE_COUNT; } +// Allowed tensors for arbitrary quantization with --tensor-type option +static const std::vector<std::string> ALLOWED_TENSOR_TYPE = { + "attn_k", + "attn_kv_a_mqa", + "attn_kv_b", + "attn_o", + "attn_output", + "attn_q", + "attn_q_a", + "attn_q_b", + "attn_qkv", + "attn_v", + "channel_mix_key", + "channel_mix_receptance", + "channel_mix_value", + "cls", + "cls.output", + "cross_attn_k", + "cross_attn_o", + "cross_attn_q", + "cross_attn_v", + "ffn_act", + "ffn_down", + "ffn_down_exps", + "ffn_down_shexp", + "ffn_gate", + "ffn_gate_exps", + "ffn_gate_shexp", + "ffn_up", + "ffn_up_exps", + "ffn_up_shexp", + "ssm_in", + "ssm_out", + "time_mix_gate", + "time_mix_key", + "time_mix_output", + "time_mix_receptance", + "time_mix_value", +}; + +// changes to this struct must be replicated in llama-quant.cpp +struct tensor_quantization { + std::string name; + ggml_type quant = GGML_TYPE_COUNT; +}; + +static bool parse_tensor_type(const char * data, std::vector<tensor_quantization> & tensor_type) { + const char * sep = strchr(data, '='); + if (sep == nullptr) { + printf("\n%s: malformed tensor type '%s'\n\n", __func__, data); + return false; + } + + const size_t tn_len = sep - data; + if (tn_len == 0) { + printf("\n%s: missing tensor name\n\n", __func__); + return false; + } + + if (const size_t qt_len = strlen(sep); qt_len == 1) { + printf("\n%s: missing quantization type\n\n", __func__); + return false; + } + + std::string tn(data, tn_len); + std::transform(tn.begin(), tn.end(), tn.begin(), tolower); + sep++; + const std::string qt(sep); + + bool found = false; + for (const auto & allowed : ALLOWED_TENSOR_TYPE) { + std::string tensor; + tensor = tn.rfind('.') != std::string::npos ? tn.substr(tn.rfind('.') + 1) : tn; + // handle special case of cls.output + std::string cls_output = "cls.output"; + if (tn.find(cls_output) != std::string::npos) { + tensor = "cls.output"; + } + // check if an allowed tensor exists and it's at the end of the kv string + if (tensor == allowed) { + found = true; + break; + } + } + if (!found) { + printf("\n%s: invalid tensor name '%s'\n\n", __func__, tn.c_str()); + return false; + } + + if (parse_ggml_type(qt.c_str()) == GGML_TYPE_COUNT) { + printf("\n%s: invalid quantization type '%s'\n\n", __func__, qt.c_str()); + return false; + } + + tensor_quantization tqz; + tqz.name = tn; + tqz.quant = parse_ggml_type(qt.c_str()); + tensor_type.emplace_back(std::move(tqz)); + return true; +} + int main(int argc, char ** argv) { if (argc < 3) { usage(argv[0]); @@ -255,6 +360,7 @@ int main(int argc, char ** argv) { std::string imatrix_file; std::vector<std::string> included_weights, excluded_weights; std::vector<llama_model_kv_override> kv_overrides; + std::vector<tensor_quantization> tensor_types; for (; arg_idx < argc && strncmp(argv[arg_idx], "--", 2) == 0; arg_idx++) { if (strcmp(argv[arg_idx], "--leave-output-tensor") == 0) { @@ -277,6 +383,10 @@ int main(int argc, char ** argv) { } else { usage(argv[0]); } + } else if (strcmp(argv[arg_idx], "--tensor-type") == 0) { + if (arg_idx == argc-1 || !parse_tensor_type(argv[++arg_idx], tensor_types)) { + usage(argv[0]); + } } else if (strcmp(argv[arg_idx], "--override-kv") == 0) { if (arg_idx == argc-1 || !string_parse_kv_override(argv[++arg_idx], kv_overrides)) { usage(argv[0]); @@ -361,6 +471,9 @@ int main(int argc, char ** argv) { kv_overrides.back().key[0] = 0; params.kv_overrides = &kv_overrides; } + if (!tensor_types.empty()) { + params.tensor_types = &tensor_types; + } llama_backend_init(); diff --git a/include/llama.h b/include/llama.h index fca2b034ba270..c102525b17b8c 100644 --- a/include/llama.h +++ b/include/llama.h @@ -366,17 +366,18 @@ extern "C" { // model quantization parameters typedef struct llama_model_quantize_params { - int32_t nthread; // number of threads to use for quantizing, if <=0 will use std::thread::hardware_concurrency() - enum llama_ftype ftype; // quantize to this llama_ftype - enum ggml_type output_tensor_type; // output tensor type - enum ggml_type token_embedding_type; // token embeddings tensor type - bool allow_requantize; // allow quantizing non-f32/f16 tensors - bool quantize_output_tensor; // quantize output.weight - bool only_copy; // only copy tensors - ftype, allow_requantize and quantize_output_tensor are ignored - bool pure; // quantize all tensors to the default type - bool keep_split; // quantize to the same number of shards - void * imatrix; // pointer to importance matrix data - void * kv_overrides; // pointer to vector containing overrides + int32_t nthread; // number of threads to use for quantizing, if <=0 will use std::thread::hardware_concurrency() + enum llama_ftype ftype; // quantize to this llama_ftype + enum ggml_type output_tensor_type; // output tensor type + enum ggml_type token_embedding_type; // token embeddings tensor type + bool allow_requantize; // allow quantizing non-f32/f16 tensors + bool quantize_output_tensor; // quantize output.weight + bool only_copy; // only copy tensors - ftype, allow_requantize and quantize_output_tensor are ignored + bool pure; // quantize all tensors to the default type + bool keep_split; // quantize to the same number of shards + void * imatrix; // pointer to importance matrix data + void * kv_overrides; // pointer to vector containing overrides + void * tensor_types; // pointer to vector containing tensor types } llama_model_quantize_params; typedef struct llama_logit_bias { diff --git a/src/llama-quant.cpp b/src/llama-quant.cpp index e3e10fa6cf77f..7dc5422763118 100644 --- a/src/llama-quant.cpp +++ b/src/llama-quant.cpp @@ -10,6 +10,7 @@ #include <cinttypes> #include <fstream> #include <mutex> +#include <regex> #include <thread> #include <unordered_map> @@ -47,8 +48,14 @@ struct quantize_state_impl { {} }; +// changes to this struct must be replicated in quantize.cpp +struct tensor_quantization { + std::string name; + ggml_type quant = GGML_TYPE_COUNT; +}; + static void llama_tensor_dequantize_impl( - struct ggml_tensor * tensor, std::vector<no_init<float>> & output, std::vector<std::thread> & workers, + ggml_tensor * tensor, std::vector<no_init<float>> & output, std::vector<std::thread> & workers, const size_t nelements, const int nthread ) { if (output.size() < nelements) { @@ -536,7 +543,7 @@ static void llama_model_quantize_impl(const std::string & fname_inp, const std:: model.load_hparams(ml); model.load_stats (ml); - struct quantize_state_impl qs(model, params); + quantize_state_impl qs(model, params); if (params->only_copy) { ftype = ml.ftype; @@ -661,7 +668,7 @@ static void llama_model_quantize_impl(const std::string & fname_inp, const std:: // populate the original tensors so we get an initial meta data for (const auto * it : tensors) { uint16_t i_split = params->keep_split ? it->idx : 0; - struct ggml_tensor * tensor = it->tensor; + ggml_tensor * tensor = it->tensor; if (!ctx_outs[i_split]) { ctx_outs[i_split].reset(gguf_init_empty()); } @@ -710,7 +717,7 @@ static void llama_model_quantize_impl(const std::string & fname_inp, const std:: new_ofstream(0); for (const auto * it : tensors) { const auto & weight = *it; - struct ggml_tensor * tensor = weight.tensor; + ggml_tensor * tensor = weight.tensor; if (weight.idx != cur_split && params->keep_split) { close_ofstream(); new_ofstream(weight.idx); @@ -776,7 +783,7 @@ static void llama_model_quantize_impl(const std::string & fname_inp, const std:: // do not quantize relative position bias (T5) quantize &= name.find("attn_rel_b.weight") == std::string::npos; - enum ggml_type new_type; + ggml_type new_type; void * new_data; size_t new_size; @@ -786,6 +793,19 @@ static void llama_model_quantize_impl(const std::string & fname_inp, const std:: // get more optimal quantization type based on the tensor shape, layer, etc. if (!params->pure && ggml_is_quantized(default_type)) { new_type = llama_tensor_get_type(qs, new_type, tensor, ftype); + // unless the user specifies a type + if (params->tensor_types) { + const std::vector<tensor_quantization> & tensor_types = *static_cast<const std::vector<tensor_quantization> *>(params->tensor_types); + for (const auto & [tname, qtype] : tensor_types) { + if (std::regex pattern(tname); std::regex_search(tensor->name, pattern)) { + if (qtype != new_type) { + LLAMA_LOG_DEBUG("(overriding %s -> %s), ", ggml_type_name(new_type), ggml_type_name(qtype)); + } + new_type = qtype; + break; + } + } + } } if (params->token_embedding_type < GGML_TYPE_COUNT && strcmp(tensor->name, "token_embd.weight") == 0) { new_type = params->token_embedding_type; @@ -910,8 +930,8 @@ static void llama_model_quantize_impl(const std::string & fname_inp, const std:: // interface implementation // -struct llama_model_quantize_params llama_model_quantize_default_params() { - struct llama_model_quantize_params result = { +llama_model_quantize_params llama_model_quantize_default_params() { + llama_model_quantize_params result = { /*.nthread =*/ 0, /*.ftype =*/ LLAMA_FTYPE_MOSTLY_Q5_1, /*.output_tensor_type =*/ GGML_TYPE_COUNT, @@ -923,6 +943,7 @@ struct llama_model_quantize_params llama_model_quantize_default_params() { /*.keep_split =*/ false, /*.imatrix =*/ nullptr, /*.kv_overrides =*/ nullptr, + /*.tensor_type =*/ nullptr, }; return result;
diff --git a/examples/quantize/quantize.cpp b/examples/quantize/quantize.cpp index a4468b1698722..0355311dc5c06 100644 --- a/examples/quantize/quantize.cpp +++ b/examples/quantize/quantize.cpp @@ -9,6 +9,7 @@ #include <cmath> #include <cctype> +#include <algorithm> struct quant_option { std::string name; @@ -16,7 +17,7 @@ struct quant_option { std::string desc; -static const std::vector<struct quant_option> QUANT_OPTIONS = { +static const std::vector<quant_option> QUANT_OPTIONS = { { "Q4_0", LLAMA_FTYPE_MOSTLY_Q4_0, " 4.34G, +0.4685 ppl @ Llama-3-8B", }, { "Q4_1", LLAMA_FTYPE_MOSTLY_Q4_1, " 4.78G, +0.4511 ppl @ Llama-3-8B", }, { "Q5_0", LLAMA_FTYPE_MOSTLY_Q5_0, " 5.21G, +0.1316 ppl @ Llama-3-8B", }, @@ -105,7 +106,8 @@ static bool try_parse_ftype(const std::string & ftype_str_in, llama_ftype & ftyp [[noreturn]] static void usage(const char * executable) { - printf("usage: %s [--help] [--allow-requantize] [--leave-output-tensor] [--pure] [--imatrix] [--include-weights] [--exclude-weights] [--output-tensor-type] [--token-embedding-type] [--override-kv] model-f32.gguf [model-quant.gguf] type [nthreads]\n\n", executable); + printf("usage: %s [--help] [--allow-requantize] [--leave-output-tensor] [--pure] [--imatrix] [--include-weights] [--exclude-weights] [--output-tensor-type]\n", executable); + printf(" [--token-embedding-type] [--tensor-type] [--keep-split] [--override-kv] model-f32.gguf [model-quant.gguf] type [nthreads]\n\n"); printf(" --allow-requantize: Allows requantizing tensors that have already been quantized. Warning: This can severely reduce quality compared to quantizing from 16bit or 32bit\n"); printf(" --leave-output-tensor: Will leave output.weight un(re)quantized. Increases model size but may also increase quality, especially when requantizing\n"); printf(" --pure: Disable k-quant mixtures and quantize all tensors to the same type\n"); @@ -114,6 +116,8 @@ static void usage(const char * executable) { printf(" --exclude-weights tensor_name: use importance matrix for this/these tensor(s)\n"); printf(" --output-tensor-type ggml_type: use this ggml_type for the output.weight tensor\n"); printf(" --token-embedding-type ggml_type: use this ggml_type for the token embeddings tensor\n"); + printf(" --tensor-type TENSOR=TYPE: quantize this tensor to this ggml_type. example: --tensor-type attn_q=q8_0\n"); printf(" --keep-split: will generate quantized model in the same shards as input\n"); printf(" --override-kv KEY=TYPE:VALUE\n"); printf(" Advanced option to override model metadata by key in the quantized model. May be specified multiple times.\n"); @@ -244,6 +248,107 @@ static ggml_type parse_ggml_type(const char * arg) { return GGML_TYPE_COUNT; } +// Allowed tensors for arbitrary quantization with --tensor-type option +static const std::vector<std::string> ALLOWED_TENSOR_TYPE = { + "attn_k", + "attn_kv_b", + "attn_o", + "attn_output", + "attn_q", + "attn_q_a", + "attn_qkv", + "attn_v", + "channel_mix_key", + "channel_mix_receptance", + "channel_mix_value", + "cls", + "cls.output", + "cross_attn_o", + "cross_attn_q", + "cross_attn_v", + "ffn_act", + "ffn_down_exps", + "ffn_down_shexp", + "ffn_gate_exps", + "ffn_gate_shexp", + "ffn_up", + "ffn_up_exps", + "ssm_in", + "ssm_out", + "time_mix_gate", + "time_mix_key", + "time_mix_output", + "time_mix_receptance", +static bool parse_tensor_type(const char * data, std::vector<tensor_quantization> & tensor_type) { + const char * sep = strchr(data, '='); + if (sep == nullptr) { + printf("\n%s: malformed tensor type '%s'\n\n", __func__, data); + const size_t tn_len = sep - data; + printf("\n%s: missing tensor name\n\n", __func__); + if (const size_t qt_len = strlen(sep); qt_len == 1) { + printf("\n%s: missing quantization type\n\n", __func__); + std::string tn(data, tn_len); + std::transform(tn.begin(), tn.end(), tn.begin(), tolower); + sep++; + const std::string qt(sep); + for (const auto & allowed : ALLOWED_TENSOR_TYPE) { + std::string tensor; + tensor = tn.rfind('.') != std::string::npos ? tn.substr(tn.rfind('.') + 1) : tn; + // handle special case of cls.output + std::string cls_output = "cls.output"; + if (tn.find(cls_output) != std::string::npos) { + tensor = "cls.output"; + if (tensor == allowed) { + found = true; + if (!found) { + printf("\n%s: invalid tensor name '%s'\n\n", __func__, tn.c_str()); + if (parse_ggml_type(qt.c_str()) == GGML_TYPE_COUNT) { + printf("\n%s: invalid quantization type '%s'\n\n", __func__, qt.c_str()); + tensor_quantization tqz; + tqz.name = tn; + tqz.quant = parse_ggml_type(qt.c_str()); + tensor_type.emplace_back(std::move(tqz)); + return true; +} int main(int argc, char ** argv) { if (argc < 3) { usage(argv[0]); @@ -255,6 +360,7 @@ int main(int argc, char ** argv) { std::string imatrix_file; std::vector<std::string> included_weights, excluded_weights; std::vector<llama_model_kv_override> kv_overrides; + std::vector<tensor_quantization> tensor_types; for (; arg_idx < argc && strncmp(argv[arg_idx], "--", 2) == 0; arg_idx++) { if (strcmp(argv[arg_idx], "--leave-output-tensor") == 0) { @@ -277,6 +383,10 @@ int main(int argc, char ** argv) { } else { + } else if (strcmp(argv[arg_idx], "--tensor-type") == 0) { + if (arg_idx == argc-1 || !parse_tensor_type(argv[++arg_idx], tensor_types)) { + usage(argv[0]); + } } else if (strcmp(argv[arg_idx], "--override-kv") == 0) { if (arg_idx == argc-1 || !string_parse_kv_override(argv[++arg_idx], kv_overrides)) { @@ -361,6 +471,9 @@ int main(int argc, char ** argv) { kv_overrides.back().key[0] = 0; params.kv_overrides = &kv_overrides; } + if (!tensor_types.empty()) { + params.tensor_types = &tensor_types; llama_backend_init(); diff --git a/include/llama.h b/include/llama.h index fca2b034ba270..c102525b17b8c 100644 --- a/include/llama.h +++ b/include/llama.h @@ -366,17 +366,18 @@ extern "C" { // model quantization parameters typedef struct llama_model_quantize_params { - int32_t nthread; // number of threads to use for quantizing, if <=0 will use std::thread::hardware_concurrency() - enum llama_ftype ftype; // quantize to this llama_ftype - enum ggml_type output_tensor_type; // output tensor type - enum ggml_type token_embedding_type; // token embeddings tensor type - bool allow_requantize; // allow quantizing non-f32/f16 tensors - bool quantize_output_tensor; // quantize output.weight - bool only_copy; // only copy tensors - ftype, allow_requantize and quantize_output_tensor are ignored - bool pure; // quantize all tensors to the default type - bool keep_split; // quantize to the same number of shards - void * imatrix; // pointer to importance matrix data - void * kv_overrides; // pointer to vector containing overrides + int32_t nthread; // number of threads to use for quantizing, if <=0 will use std::thread::hardware_concurrency() + enum llama_ftype ftype; // quantize to this llama_ftype + enum ggml_type output_tensor_type; // output tensor type + enum ggml_type token_embedding_type; // token embeddings tensor type + bool allow_requantize; // allow quantizing non-f32/f16 tensors + bool quantize_output_tensor; // quantize output.weight + bool only_copy; // only copy tensors - ftype, allow_requantize and quantize_output_tensor are ignored + bool pure; // quantize all tensors to the default type + bool keep_split; // quantize to the same number of shards + void * imatrix; // pointer to importance matrix data + void * kv_overrides; // pointer to vector containing overrides + void * tensor_types; // pointer to vector containing tensor types } llama_model_quantize_params; typedef struct llama_logit_bias { diff --git a/src/llama-quant.cpp b/src/llama-quant.cpp index e3e10fa6cf77f..7dc5422763118 100644 --- a/src/llama-quant.cpp +++ b/src/llama-quant.cpp @@ -10,6 +10,7 @@ #include <cinttypes> #include <mutex> +#include <regex> #include <thread> #include <unordered_map> @@ -47,8 +48,14 @@ struct quantize_state_impl { {} +// changes to this struct must be replicated in quantize.cpp static void llama_tensor_dequantize_impl( - struct ggml_tensor * tensor, std::vector<no_init<float>> & output, std::vector<std::thread> & workers, const size_t nelements, const int nthread ) { if (output.size() < nelements) { @@ -536,7 +543,7 @@ static void llama_model_quantize_impl(const std::string & fname_inp, const std:: model.load_hparams(ml); model.load_stats (ml); - struct quantize_state_impl qs(model, params); + quantize_state_impl qs(model, params); if (params->only_copy) { ftype = ml.ftype; @@ -661,7 +668,7 @@ static void llama_model_quantize_impl(const std::string & fname_inp, const std:: // populate the original tensors so we get an initial meta data uint16_t i_split = params->keep_split ? it->idx : 0; - struct ggml_tensor * tensor = it->tensor; + ggml_tensor * tensor = it->tensor; if (!ctx_outs[i_split]) { ctx_outs[i_split].reset(gguf_init_empty()); } @@ -710,7 +717,7 @@ static void llama_model_quantize_impl(const std::string & fname_inp, const std:: new_ofstream(0); const auto & weight = *it; - struct ggml_tensor * tensor = weight.tensor; + ggml_tensor * tensor = weight.tensor; if (weight.idx != cur_split && params->keep_split) { close_ofstream(); new_ofstream(weight.idx); @@ -776,7 +783,7 @@ static void llama_model_quantize_impl(const std::string & fname_inp, const std:: // do not quantize relative position bias (T5) quantize &= name.find("attn_rel_b.weight") == std::string::npos; + ggml_type new_type; void * new_data; size_t new_size; @@ -786,6 +793,19 @@ static void llama_model_quantize_impl(const std::string & fname_inp, const std:: // get more optimal quantization type based on the tensor shape, layer, etc. if (!params->pure && ggml_is_quantized(default_type)) { new_type = llama_tensor_get_type(qs, new_type, tensor, ftype); + // unless the user specifies a type + if (params->tensor_types) { + const std::vector<tensor_quantization> & tensor_types = *static_cast<const std::vector<tensor_quantization> *>(params->tensor_types); + for (const auto & [tname, qtype] : tensor_types) { + if (std::regex pattern(tname); std::regex_search(tensor->name, pattern)) { + if (qtype != new_type) { + LLAMA_LOG_DEBUG("(overriding %s -> %s), ", ggml_type_name(new_type), ggml_type_name(qtype)); + } + new_type = qtype; + break; + } + } + } if (params->token_embedding_type < GGML_TYPE_COUNT && strcmp(tensor->name, "token_embd.weight") == 0) { new_type = params->token_embedding_type; @@ -910,8 +930,8 @@ static void llama_model_quantize_impl(const std::string & fname_inp, const std:: // interface implementation -struct llama_model_quantize_params llama_model_quantize_default_params() { - struct llama_model_quantize_params result = { +llama_model_quantize_params llama_model_quantize_default_params() { + llama_model_quantize_params result = { /*.nthread =*/ 0, /*.ftype =*/ LLAMA_FTYPE_MOSTLY_Q5_1, /*.output_tensor_type =*/ GGML_TYPE_COUNT, @@ -923,6 +943,7 @@ struct llama_model_quantize_params llama_model_quantize_default_params() { /*.keep_split =*/ false, /*.imatrix =*/ nullptr, /*.kv_overrides =*/ nullptr, + /*.tensor_type =*/ nullptr, }; return result;
[ "+ printf(\" Advanced option to selectively quantize tensors. May be specified multiple times.\\n\");", "+ \"attn_kv_a_mqa\",", "+ \"attn_q_b\",", "+ \"cross_attn_k\",", "+ \"ffn_down\",", "+ \"ffn_gate\",", "+ \"ffn_up_shexp\",", "+ \"time_mix_value\",", "+// changes to this struct must be replicated in llama-quant.cpp", "+ if (tn_len == 0) {", "+ bool found = false;", "+ // check if an allowed tensor exists and it's at the end of the kv string", "+ break;", "+ ggml_tensor * tensor, std::vector<no_init<float>> & output, std::vector<std::thread> & workers,", "- enum ggml_type new_type;" ]
[ 36, 47, 53, 61, 66, 69, 74, 81, 84, 98, 113, 122, 125, 235, 270 ]
{ "additions": 155, "author": "EAddario", "deletions": 20, "html_url": "https://github.com/ggml-org/llama.cpp/pull/12511", "issue_id": 12511, "merged_at": "2025-04-13T18:29:28Z", "omission_probability": 0.1, "pr_number": 12511, "repo": "ggml-org/llama.cpp", "title": "quantize: Handle user-defined quantization levels for additional tensors", "total_changes": 175 }
806
diff --git a/ggml/src/ggml-metal/ggml-metal.m b/ggml/src/ggml-metal/ggml-metal.m index 85f3ae7bfdc31..266d8af4693c2 100644 --- a/ggml/src/ggml-metal/ggml-metal.m +++ b/ggml/src/ggml-metal/ggml-metal.m @@ -481,6 +481,7 @@ static void ggml_backend_metal_device_rel(struct ggml_backend_metal_device_conte GGML_METAL_KERNEL_TYPE_SQRT, GGML_METAL_KERNEL_TYPE_SIN, GGML_METAL_KERNEL_TYPE_COS, + GGML_METAL_KERNEL_TYPE_NEG, GGML_METAL_KERNEL_TYPE_SUM_ROWS, GGML_METAL_KERNEL_TYPE_POOL_2D_AVG_F32, GGML_METAL_KERNEL_TYPE_POOL_2D_MAX_F32, @@ -1159,6 +1160,7 @@ @implementation GGMLMetalClass GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_SQRT, sqrt, true); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_SIN, sin, true); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_COS, cos, true); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_NEG, neg, true); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_SUM_ROWS, sum_rows, true); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_ARGMAX, argmax, true); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_POOL_2D_AVG_F32, pool_2d_avg_f32, true); @@ -1320,6 +1322,7 @@ static bool ggml_metal_supports_op(const struct ggml_backend_metal_device_contex case GGML_UNARY_OP_GELU_QUICK: case GGML_UNARY_OP_SILU: case GGML_UNARY_OP_ELU: + case GGML_UNARY_OP_NEG: return ggml_is_contiguous(op->src[0]) && op->src[0]->type == GGML_TYPE_F32; default: return false; @@ -2010,6 +2013,18 @@ static void ggml_metal_encode_node( [encoder dispatchThreadgroups:MTLSizeMake(n, 1, 1) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)]; } break; + case GGML_UNARY_OP_NEG: + { + id<MTLComputePipelineState> pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_NEG].pipeline; + + [encoder setComputePipelineState:pipeline]; + [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0]; + [encoder setBuffer:id_dst offset:offs_dst atIndex:1]; + + const int64_t n = ggml_nelements(dst); + + [encoder dispatchThreadgroups:MTLSizeMake(n, 1, 1) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)]; + } break; default: { GGML_LOG_WARN("%s: node %3d, op = %8s not implemented\n", __func__, idx, ggml_op_name(dst->op)); diff --git a/ggml/src/ggml-metal/ggml-metal.metal b/ggml/src/ggml-metal/ggml-metal.metal index dc7eab03ee8a2..8d6e99e621e9e 100644 --- a/ggml/src/ggml-metal/ggml-metal.metal +++ b/ggml/src/ggml-metal/ggml-metal.metal @@ -949,6 +949,13 @@ kernel void kernel_cos( dst[tpig] = cos(src0[tpig]); } +kernel void kernel_neg( + device const float * src0, + device float * dst, + uint tpig[[thread_position_in_grid]]) { + dst[tpig] = -src0[tpig]; +} + kernel void kernel_sum_rows( device const float * src0, device float * dst,
diff --git a/ggml/src/ggml-metal/ggml-metal.m b/ggml/src/ggml-metal/ggml-metal.m index 85f3ae7bfdc31..266d8af4693c2 100644 --- a/ggml/src/ggml-metal/ggml-metal.m +++ b/ggml/src/ggml-metal/ggml-metal.m @@ -481,6 +481,7 @@ static void ggml_backend_metal_device_rel(struct ggml_backend_metal_device_conte GGML_METAL_KERNEL_TYPE_SQRT, GGML_METAL_KERNEL_TYPE_SIN, GGML_METAL_KERNEL_TYPE_COS, + GGML_METAL_KERNEL_TYPE_NEG, GGML_METAL_KERNEL_TYPE_SUM_ROWS, GGML_METAL_KERNEL_TYPE_POOL_2D_AVG_F32, GGML_METAL_KERNEL_TYPE_POOL_2D_MAX_F32, @@ -1159,6 +1160,7 @@ @implementation GGMLMetalClass GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_SQRT, sqrt, true); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_SIN, sin, true); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_COS, cos, true); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_NEG, neg, true); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_SUM_ROWS, sum_rows, true); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_ARGMAX, argmax, true); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_POOL_2D_AVG_F32, pool_2d_avg_f32, true); @@ -1320,6 +1322,7 @@ static bool ggml_metal_supports_op(const struct ggml_backend_metal_device_contex case GGML_UNARY_OP_GELU_QUICK: case GGML_UNARY_OP_SILU: case GGML_UNARY_OP_ELU: return ggml_is_contiguous(op->src[0]) && op->src[0]->type == GGML_TYPE_F32; return false; @@ -2010,6 +2013,18 @@ static void ggml_metal_encode_node( [encoder dispatchThreadgroups:MTLSizeMake(n, 1, 1) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)]; } break; + { + id<MTLComputePipelineState> pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_NEG].pipeline; + [encoder setComputePipelineState:pipeline]; + [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0]; + [encoder setBuffer:id_dst offset:offs_dst atIndex:1]; + const int64_t n = ggml_nelements(dst); + [encoder dispatchThreadgroups:MTLSizeMake(n, 1, 1) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)]; { GGML_LOG_WARN("%s: node %3d, op = %8s not implemented\n", __func__, idx, ggml_op_name(dst->op)); diff --git a/ggml/src/ggml-metal/ggml-metal.metal b/ggml/src/ggml-metal/ggml-metal.metal index dc7eab03ee8a2..8d6e99e621e9e 100644 --- a/ggml/src/ggml-metal/ggml-metal.metal +++ b/ggml/src/ggml-metal/ggml-metal.metal @@ -949,6 +949,13 @@ kernel void kernel_cos( dst[tpig] = cos(src0[tpig]); } +kernel void kernel_neg( + device const float * src0, + device float * dst, + uint tpig[[thread_position_in_grid]]) { + dst[tpig] = -src0[tpig]; +} kernel void kernel_sum_rows( device const float * src0, device float * dst,
[ "+ } break;" ]
[ 43 ]
{ "additions": 22, "author": "jmorganca", "deletions": 0, "html_url": "https://github.com/ggml-org/llama.cpp/pull/13029", "issue_id": 13029, "merged_at": "2025-04-20T05:28:40Z", "omission_probability": 0.1, "pr_number": 13029, "repo": "ggml-org/llama.cpp", "title": "metal: add neg operator", "total_changes": 22 }
807
diff --git a/ggml/src/ggml-vulkan/ggml-vulkan.cpp b/ggml/src/ggml-vulkan/ggml-vulkan.cpp index 2f6d03c939eb5..39f3cd343ac45 100644 --- a/ggml/src/ggml-vulkan/ggml-vulkan.cpp +++ b/ggml/src/ggml-vulkan/ggml-vulkan.cpp @@ -2397,7 +2397,7 @@ static void ggml_vk_load_shaders(vk_device& device) { ggml_vk_create_pipeline(device, device->pipeline_norm_f32, "norm_f32", norm_f32_len, norm_f32_data, "main", 2, sizeof(vk_op_push_constants), {1, 1, 1}, {}, 1); ggml_vk_create_pipeline(device, device->pipeline_group_norm_f32, "group_norm_f32", group_norm_f32_len, group_norm_f32_data, "main", 2, sizeof(vk_op_push_constants), {1, 1, 1}, {}, 1); - ggml_vk_create_pipeline(device, device->pipeline_rms_norm_f32, "rms_norm_f32", rms_norm_f32_len, rms_norm_f32_data, "main", 2, sizeof(vk_op_push_constants), {1, 1, 1}, {}, 1); + ggml_vk_create_pipeline(device, device->pipeline_rms_norm_f32, "rms_norm_f32", rms_norm_f32_len, rms_norm_f32_data, "main", 2, sizeof(vk_op_unary_push_constants), {1, 1, 1}, {}, 1); ggml_vk_create_pipeline(device, device->pipeline_rms_norm_back_f32, "rms_norm_back_f32", rms_norm_back_f32_len, rms_norm_back_f32_data, "main", 3, sizeof(vk_op_push_constants), {1, 1, 1}, {}, 1); ggml_vk_create_pipeline(device, device->pipeline_l2_norm_f32, "l2_norm_f32", l2_norm_f32_len, l2_norm_f32_data, "main", 2, sizeof(vk_op_push_constants), {1, 1, 1}, {}, 1); @@ -6006,6 +6006,7 @@ static bool ggml_vk_op_supports_incontiguous(ggml_op op) { case GGML_OP_REPEAT: case GGML_OP_REPEAT_BACK: case GGML_OP_ROPE: + case GGML_OP_RMS_NORM: return true; default: return false; @@ -6216,7 +6217,6 @@ static void ggml_vk_op_f32(ggml_backend_vk_context * ctx, vk_context& subctx, co switch (op) { case GGML_OP_NORM: - case GGML_OP_RMS_NORM: case GGML_OP_RMS_NORM_BACK: case GGML_OP_L2_NORM: case GGML_OP_SOFT_MAX: @@ -6233,6 +6233,10 @@ static void ggml_vk_op_f32(ggml_backend_vk_context * ctx, vk_context& subctx, co elements = { nr, 1, 1 }; } } break; + case GGML_OP_RMS_NORM: + elements = { (uint32_t)ne01, (uint32_t)ne02, (uint32_t)ne03 }; + break; + case GGML_OP_SUM: // We use GGML_OP_SUM_ROWS with 1 row. elements = { 1, 1, 1 }; @@ -6883,7 +6887,17 @@ static void ggml_vk_group_norm(ggml_backend_vk_context * ctx, vk_context& subctx static void ggml_vk_rms_norm(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, ggml_tensor * dst, bool dryrun = false) { float * op_params = (float *)dst->op_params; - ggml_vk_op_f32<vk_op_push_constants>(ctx, subctx, src0, nullptr, nullptr, dst, GGML_OP_RMS_NORM, { (uint32_t)src0->ne[0], (uint32_t)src0->ne[1], op_params[0], 0.0f }, dryrun); + const uint32_t src0_type_size = ggml_type_size(src0->type); + const uint32_t dst_type_size = ggml_type_size(dst->type); + + ggml_vk_op_f32<vk_op_unary_push_constants>(ctx, subctx, src0, nullptr, nullptr, dst, GGML_OP_RMS_NORM, { + (uint32_t)ggml_nelements(src0), + (uint32_t)src0->ne[0], (uint32_t)src0->ne[1], (uint32_t)src0->ne[2], (uint32_t)src0->ne[3], (uint32_t)src0->nb[0] / src0_type_size, (uint32_t)src0->nb[1] / src0_type_size, (uint32_t)src0->nb[2] / src0_type_size, (uint32_t)src0->nb[3] / src0_type_size, + (uint32_t) dst->ne[0], (uint32_t) dst->ne[1], (uint32_t) dst->ne[2], (uint32_t) dst->ne[3], (uint32_t) dst->nb[0] / dst_type_size, (uint32_t) dst->nb[1] / dst_type_size, (uint32_t) dst->nb[2] / dst_type_size, (uint32_t) dst->nb[3] / dst_type_size, + 0, + op_params[0], 0.0f, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + }, dryrun); } static void ggml_vk_rms_norm_back(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, bool dryrun = false) { @@ -9388,10 +9402,10 @@ static bool ggml_backend_vk_device_supports_op(ggml_backend_dev_t dev, const ggm case GGML_OP_VIEW: case GGML_OP_PERMUTE: case GGML_OP_TRANSPOSE: + case GGML_OP_RMS_NORM: return true; case GGML_OP_NORM: case GGML_OP_GROUP_NORM: - case GGML_OP_RMS_NORM: case GGML_OP_L2_NORM: return ggml_is_contiguous(op->src[0]); case GGML_OP_ADD: diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/rms_norm.comp b/ggml/src/ggml-vulkan/vulkan-shaders/rms_norm.comp index b554400ba393f..deb8ee9960f58 100644 --- a/ggml/src/ggml-vulkan/vulkan-shaders/rms_norm.comp +++ b/ggml/src/ggml-vulkan/vulkan-shaders/rms_norm.comp @@ -1,6 +1,6 @@ #version 450 -#include "generic_head.comp" +#include "generic_unary_head.comp" #include "types.comp" #extension GL_EXT_control_flow_attributes : enable @@ -8,19 +8,29 @@ layout(local_size_x = BLOCK_SIZE, local_size_y = 1, local_size_z = 1) in; -layout (binding = 0) readonly buffer X {A_TYPE data_a[];}; -layout (binding = 1) writeonly buffer D {D_TYPE data_d[];}; - shared FLOAT_TYPE sum[BLOCK_SIZE]; void main() { - const uint row = gl_WorkGroupID.z * 262144 + gl_WorkGroupID.y * 512 + gl_WorkGroupID.x; - const uint tid = gl_LocalInvocationID.x; + const uint ncols = p.ne00; + const uint nrows = gl_NumWorkGroups.x; + const uint nchannels = gl_NumWorkGroups.y; + + const uint row = gl_WorkGroupID.x; + const uint channel = gl_WorkGroupID.y; + const uint samp = gl_WorkGroupID.z; + const uint tid = gl_LocalInvocationID.x; + + const uint stride_row = p.nb01; + const uint stride_channel = p.nb02; + const uint stride_sample = p.nb03; + + uint32_t a_offset = samp*stride_sample + channel*stride_channel + row*stride_row + get_aoffset(); + uint32_t d_offset = ((samp*nchannels + channel)*nrows + row)*ncols + get_doffset(); sum[tid] = FLOAT_TYPE(0.0f); // partial sum for thread in warp - [[unroll]] for (uint col = tid; col < p.KX; col += BLOCK_SIZE) { - const FLOAT_TYPE xi = FLOAT_TYPE(data_a[row*p.KX + col]); + [[unroll]] for (uint col = tid; col < ncols; col += BLOCK_SIZE) { + const FLOAT_TYPE xi = FLOAT_TYPE(data_a[a_offset + col]); sum[tid] += xi * xi; } @@ -33,10 +43,10 @@ void main() { barrier(); } - const FLOAT_TYPE mean = sum[0] / FLOAT_TYPE(p.KX); + const FLOAT_TYPE mean = sum[0] / FLOAT_TYPE(ncols); const FLOAT_TYPE scale = inversesqrt(mean + FLOAT_TYPE(p.param1)); - [[unroll]] for (uint col = tid; col < p.KX; col += BLOCK_SIZE) { - data_d[row*p.KX + col] = D_TYPE(scale * FLOAT_TYPE(data_a[row*p.KX + col])); + [[unroll]] for (uint col = tid; col < ncols; col += BLOCK_SIZE) { + data_d[d_offset + col] = D_TYPE(scale * FLOAT_TYPE(data_a[a_offset + col])); } }
diff --git a/ggml/src/ggml-vulkan/ggml-vulkan.cpp b/ggml/src/ggml-vulkan/ggml-vulkan.cpp index 2f6d03c939eb5..39f3cd343ac45 100644 --- a/ggml/src/ggml-vulkan/ggml-vulkan.cpp +++ b/ggml/src/ggml-vulkan/ggml-vulkan.cpp @@ -2397,7 +2397,7 @@ static void ggml_vk_load_shaders(vk_device& device) { ggml_vk_create_pipeline(device, device->pipeline_norm_f32, "norm_f32", norm_f32_len, norm_f32_data, "main", 2, sizeof(vk_op_push_constants), {1, 1, 1}, {}, 1); ggml_vk_create_pipeline(device, device->pipeline_group_norm_f32, "group_norm_f32", group_norm_f32_len, group_norm_f32_data, "main", 2, sizeof(vk_op_push_constants), {1, 1, 1}, {}, 1); - ggml_vk_create_pipeline(device, device->pipeline_rms_norm_f32, "rms_norm_f32", rms_norm_f32_len, rms_norm_f32_data, "main", 2, sizeof(vk_op_push_constants), {1, 1, 1}, {}, 1); ggml_vk_create_pipeline(device, device->pipeline_rms_norm_back_f32, "rms_norm_back_f32", rms_norm_back_f32_len, rms_norm_back_f32_data, "main", 3, sizeof(vk_op_push_constants), {1, 1, 1}, {}, 1); ggml_vk_create_pipeline(device, device->pipeline_l2_norm_f32, "l2_norm_f32", l2_norm_f32_len, l2_norm_f32_data, "main", 2, sizeof(vk_op_push_constants), {1, 1, 1}, {}, 1); @@ -6006,6 +6006,7 @@ static bool ggml_vk_op_supports_incontiguous(ggml_op op) { case GGML_OP_REPEAT: case GGML_OP_REPEAT_BACK: case GGML_OP_ROPE: return true; default: return false; @@ -6216,7 +6217,6 @@ static void ggml_vk_op_f32(ggml_backend_vk_context * ctx, vk_context& subctx, co switch (op) { case GGML_OP_NORM: - case GGML_OP_RMS_NORM: case GGML_OP_RMS_NORM_BACK: case GGML_OP_L2_NORM: case GGML_OP_SOFT_MAX: @@ -6233,6 +6233,10 @@ static void ggml_vk_op_f32(ggml_backend_vk_context * ctx, vk_context& subctx, co elements = { nr, 1, 1 }; } } break; + elements = { (uint32_t)ne01, (uint32_t)ne02, (uint32_t)ne03 }; case GGML_OP_SUM: // We use GGML_OP_SUM_ROWS with 1 row. elements = { 1, 1, 1 }; @@ -6883,7 +6887,17 @@ static void ggml_vk_group_norm(ggml_backend_vk_context * ctx, vk_context& subctx static void ggml_vk_rms_norm(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, ggml_tensor * dst, bool dryrun = false) { float * op_params = (float *)dst->op_params; - ggml_vk_op_f32<vk_op_push_constants>(ctx, subctx, src0, nullptr, nullptr, dst, GGML_OP_RMS_NORM, { (uint32_t)src0->ne[0], (uint32_t)src0->ne[1], op_params[0], 0.0f }, dryrun); + const uint32_t src0_type_size = ggml_type_size(src0->type); + const uint32_t dst_type_size = ggml_type_size(dst->type); + ggml_vk_op_f32<vk_op_unary_push_constants>(ctx, subctx, src0, nullptr, nullptr, dst, GGML_OP_RMS_NORM, { + (uint32_t)ggml_nelements(src0), + (uint32_t)src0->ne[0], (uint32_t)src0->ne[1], (uint32_t)src0->ne[2], (uint32_t)src0->ne[3], (uint32_t)src0->nb[0] / src0_type_size, (uint32_t)src0->nb[1] / src0_type_size, (uint32_t)src0->nb[2] / src0_type_size, (uint32_t)src0->nb[3] / src0_type_size, + (uint32_t) dst->ne[0], (uint32_t) dst->ne[1], (uint32_t) dst->ne[2], (uint32_t) dst->ne[3], (uint32_t) dst->nb[0] / dst_type_size, (uint32_t) dst->nb[1] / dst_type_size, (uint32_t) dst->nb[2] / dst_type_size, (uint32_t) dst->nb[3] / dst_type_size, + 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + }, dryrun); static void ggml_vk_rms_norm_back(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, bool dryrun = false) { @@ -9388,10 +9402,10 @@ static bool ggml_backend_vk_device_supports_op(ggml_backend_dev_t dev, const ggm case GGML_OP_VIEW: case GGML_OP_PERMUTE: case GGML_OP_TRANSPOSE: + case GGML_OP_RMS_NORM: return true; case GGML_OP_NORM: case GGML_OP_GROUP_NORM: - case GGML_OP_RMS_NORM: case GGML_OP_L2_NORM: return ggml_is_contiguous(op->src[0]); case GGML_OP_ADD: diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/rms_norm.comp b/ggml/src/ggml-vulkan/vulkan-shaders/rms_norm.comp index b554400ba393f..deb8ee9960f58 100644 --- a/ggml/src/ggml-vulkan/vulkan-shaders/rms_norm.comp +++ b/ggml/src/ggml-vulkan/vulkan-shaders/rms_norm.comp @@ -1,6 +1,6 @@ #version 450 -#include "generic_head.comp" +#include "generic_unary_head.comp" #include "types.comp" #extension GL_EXT_control_flow_attributes : enable @@ -8,19 +8,29 @@ layout(local_size_x = BLOCK_SIZE, local_size_y = 1, local_size_z = 1) in; -layout (binding = 0) readonly buffer X {A_TYPE data_a[];}; -layout (binding = 1) writeonly buffer D {D_TYPE data_d[];}; - shared FLOAT_TYPE sum[BLOCK_SIZE]; void main() { - const uint row = gl_WorkGroupID.z * 262144 + gl_WorkGroupID.y * 512 + gl_WorkGroupID.x; - const uint tid = gl_LocalInvocationID.x; + const uint ncols = p.ne00; + const uint nrows = gl_NumWorkGroups.x; + const uint nchannels = gl_NumWorkGroups.y; + const uint channel = gl_WorkGroupID.y; + const uint samp = gl_WorkGroupID.z; + const uint tid = gl_LocalInvocationID.x; + const uint stride_row = p.nb01; + const uint stride_channel = p.nb02; + const uint stride_sample = p.nb03; + uint32_t d_offset = ((samp*nchannels + channel)*nrows + row)*ncols + get_doffset(); sum[tid] = FLOAT_TYPE(0.0f); // partial sum for thread in warp - const FLOAT_TYPE xi = FLOAT_TYPE(data_a[row*p.KX + col]); + const FLOAT_TYPE xi = FLOAT_TYPE(data_a[a_offset + col]); sum[tid] += xi * xi; @@ -33,10 +43,10 @@ void main() { barrier(); - const FLOAT_TYPE mean = sum[0] / FLOAT_TYPE(p.KX); + const FLOAT_TYPE mean = sum[0] / FLOAT_TYPE(ncols); const FLOAT_TYPE scale = inversesqrt(mean + FLOAT_TYPE(p.param1)); - data_d[row*p.KX + col] = D_TYPE(scale * FLOAT_TYPE(data_a[row*p.KX + col])); + data_d[d_offset + col] = D_TYPE(scale * FLOAT_TYPE(data_a[a_offset + col]));
[ "+ ggml_vk_create_pipeline(device, device->pipeline_rms_norm_f32, \"rms_norm_f32\", rms_norm_f32_len, rms_norm_f32_data, \"main\", 2, sizeof(vk_op_unary_push_constants), {1, 1, 1}, {}, 1);", "+ break;", "+ op_params[0], 0.0f,", "+ const uint row = gl_WorkGroupID.x;", "+ uint32_t a_offset = samp*stride_sample + channel*stride_channel + row*stride_row + get_aoffset();" ]
[ 9, 35, 53, 99, 108 ]
{ "additions": 39, "author": "jeffbolznv", "deletions": 15, "html_url": "https://github.com/ggml-org/llama.cpp/pull/13031", "issue_id": 13031, "merged_at": "2025-04-20T08:50:02Z", "omission_probability": 0.1, "pr_number": 13031, "repo": "ggml-org/llama.cpp", "title": "vulkan: support noncontiguous rms_norm", "total_changes": 54 }
808
diff --git a/examples/main/main.cpp b/examples/main/main.cpp index fd7410a646c69..c59b941bf5e47 100644 --- a/examples/main/main.cpp +++ b/examples/main/main.cpp @@ -865,9 +865,22 @@ int main(int argc, char ** argv) { console::set_display(console::reset); display = true; - // Add tokens to embd only if the input buffer is non-empty - // Entering a empty line lets the user pass control back - if (buffer.length() > 1) { + if (buffer.empty()) { // Ctrl+D on empty line exits + LOG("EOF by user\n"); + break; + } + + if (buffer.back() == '\n') { + // Implement #587: + // If the user wants the text to end in a newline, + // this should be accomplished by explicitly adding a newline by using \ followed by return, + // then returning control by pressing return again. + buffer.pop_back(); + } + + if (buffer.empty()) { // Enter key on empty line lets the user pass control back + LOG_DBG("empty line, passing control back\n"); + } else { // Add tokens to embd only if the input buffer is non-empty // append input suffix if any if (!params.input_suffix.empty() && !params.conversation_mode) { LOG_DBG("appending input suffix: '%s'\n", params.input_suffix.c_str()); @@ -915,8 +928,6 @@ int main(int argc, char ** argv) { n_remain -= line_inp.size(); LOG_DBG("n_remain: %d\n", n_remain); - } else { - LOG_DBG("empty line, passing control back\n"); } input_echo = false; // do not echo this again
diff --git a/examples/main/main.cpp b/examples/main/main.cpp index fd7410a646c69..c59b941bf5e47 100644 --- a/examples/main/main.cpp +++ b/examples/main/main.cpp @@ -865,9 +865,22 @@ int main(int argc, char ** argv) { console::set_display(console::reset); display = true; - // Add tokens to embd only if the input buffer is non-empty - // Entering a empty line lets the user pass control back - if (buffer.length() > 1) { + LOG("EOF by user\n"); + break; + if (buffer.back() == '\n') { + // Implement #587: + // If the user wants the text to end in a newline, + // this should be accomplished by explicitly adding a newline by using \ followed by return, + // then returning control by pressing return again. + buffer.pop_back(); + if (buffer.empty()) { // Enter key on empty line lets the user pass control back + } else { // Add tokens to embd only if the input buffer is non-empty // append input suffix if any if (!params.input_suffix.empty() && !params.conversation_mode) { LOG_DBG("appending input suffix: '%s'\n", params.input_suffix.c_str()); @@ -915,8 +928,6 @@ int main(int argc, char ** argv) { n_remain -= line_inp.size(); LOG_DBG("n_remain: %d\n", n_remain); - } else { - LOG_DBG("empty line, passing control back\n"); } input_echo = false; // do not echo this again
[ "+ if (buffer.empty()) { // Ctrl+D on empty line exits", "+ LOG_DBG(\"empty line, passing control back\\n\");" ]
[ 11, 25 ]
{ "additions": 16, "author": "danielzgtg", "deletions": 5, "html_url": "https://github.com/ggml-org/llama.cpp/pull/12951", "issue_id": 12951, "merged_at": "2025-04-18T20:02:55Z", "omission_probability": 0.1, "pr_number": 12951, "repo": "ggml-org/llama.cpp", "title": "main : Fix Ctrl+D/newline handling", "total_changes": 21 }
809
diff --git a/examples/llava/gemma3-cli.cpp b/examples/llava/gemma3-cli.cpp index 91a07e2a8f40d..de206c85ae80c 100644 --- a/examples/llava/gemma3-cli.cpp +++ b/examples/llava/gemma3-cli.cpp @@ -184,18 +184,19 @@ static int eval_message(gemma3_context & ctx, common_chat_msg & msg, std::vector text.text = formatted_chat.prompt; text.add_special = add_bos; text.parse_special = true; - mtmd_input_chunks_ptr chunks(mtmd_tokenize(ctx.ctx_vision.get(), text, bitmaps)); - if (chunks == nullptr) { - LOG_ERR("Unable to tokenize prompt\n"); + mtmd_input_chunks chunks; + int32_t res = mtmd_tokenize(ctx.ctx_vision.get(), chunks, text, bitmaps); + if (res != 0) { + LOG_ERR("Unable to tokenize prompt, res = %d\n", res); return 1; } - if (mtmd_helper_eval(ctx.ctx_vision.get(), ctx.lctx, chunks.get(), ctx.n_past, 0, ctx.n_batch)) { + if (mtmd_helper_eval(ctx.ctx_vision.get(), ctx.lctx, chunks, ctx.n_past, 0, ctx.n_batch)) { LOG_ERR("Unable to eval prompt\n"); return 1; } - ctx.n_past += mtmd_helper_get_n_tokens(chunks.get()); + ctx.n_past += mtmd_helper_get_n_tokens(chunks); return 0; } diff --git a/examples/llava/mtmd.cpp b/examples/llava/mtmd.cpp index 114c274bc1250..3fd5bebc6a7d5 100644 --- a/examples/llava/mtmd.cpp +++ b/examples/llava/mtmd.cpp @@ -16,6 +16,7 @@ struct mtmd_context { struct clip_ctx * ctx_clip; const struct llama_model * text_model; std::vector<float> image_embd_v; // image embedding vector + bool print_timings; int n_threads; std::string image_marker; @@ -24,7 +25,11 @@ struct mtmd_context { mtmd_context(const char * mmproj_fname, const llama_model * text_model, - const mtmd_context_params & ctx_params) : print_timings(ctx_params.print_timings), n_threads(ctx_params.n_threads), image_marker(ctx_params.image_marker) { + const mtmd_context_params & ctx_params) : + print_timings(ctx_params.print_timings), + n_threads (ctx_params.n_threads), + image_marker (ctx_params.image_marker) + { clip_context_params ctx_clip_params; ctx_clip_params.use_gpu = ctx_params.use_gpu; ctx_clip_params.verbosity = ctx_params.verbosity; @@ -49,6 +54,7 @@ struct mtmd_image_tokens { uint32_t ny; // number of tokens in y direction uint32_t n_tokens() const { return nx * ny; } clip_image_f32_batch batch_f32; // preprocessed image patches + std::string id; // optional user-defined ID, useful for KV cache tracking }; mtmd_context * mtmd_init_from_file(const char * mmproj_fname, @@ -88,10 +94,10 @@ static std::vector<llama_token> mtmd_tokenize_text_internal( return result; } -mtmd_input_chunks * mtmd_tokenize(mtmd_context * ctx, - const mtmd_input_text & text, - const std::vector<mtmd_bitmap> & bitmaps) { - mtmd_input_chunks * output = new mtmd_input_chunks; +int32_t mtmd_tokenize(mtmd_context * ctx, + std::vector<mtmd_input_chunk> & output, + const mtmd_input_text & text, + const std::vector<mtmd_bitmap> & bitmaps) { auto vocab = llama_model_get_vocab(ctx->text_model); std::string prompt_modified(text.text); @@ -105,9 +111,9 @@ mtmd_input_chunks * mtmd_tokenize(mtmd_context * ctx, string_replace_all(prompt_modified, ctx->image_marker, marker_modified); } - std::vector<std::string> parts = string_split_str(text.text, ctx->image_marker); - output->clear(); - output->reserve(parts.size()); + std::vector<std::string> parts = string_split_str(prompt_modified, ctx->image_marker); + output.clear(); + output.reserve(parts.size()); size_t i_img = 0; @@ -123,14 +129,14 @@ mtmd_input_chunks * mtmd_tokenize(mtmd_context * ctx, std::move(tokens), {}, }; - output->emplace_back(std::move(chunk)); + output.emplace_back(std::move(chunk)); if (&parts.back() != &part) { // add image token to middle of 2 parts if (i_img >= bitmaps.size()) { LOG_ERR("%s: error: not enough images for %d parts\n", __func__, (int)parts.size()); - return nullptr; + return 1; } // shim layer @@ -145,34 +151,48 @@ mtmd_input_chunks * mtmd_tokenize(mtmd_context * ctx, bool ok = clip_image_preprocess(ctx->ctx_clip, img_u8.get(), &batch_f32); if (!ok) { LOG_ERR("Unable to preprocess image\n"); - return nullptr; + return 2; } - mtmd_image_tokens * image_tokens = new mtmd_image_tokens; + mtmd_image_tokens_ptr image_tokens(new mtmd_image_tokens); image_tokens->nx = clip_n_patches(ctx->ctx_clip); // TODO @ngxson : use clip_n_patches_by_image image_tokens->ny = 1; // TODO image_tokens->batch_f32 = std::move(batch_f32); + image_tokens->id = bitmaps[i_img].id; // optional mtmd_input_chunk chunk{ MTMD_INPUT_CHUNK_TYPE_IMAGE, {}, - image_tokens, + std::move(image_tokens), }; - output->emplace_back(std::move(chunk)); + output.emplace_back(std::move(chunk)); i_img++; } } - return output; + return 0; } -void mtmd_input_chunks_free(mtmd_input_chunks * chunks) { - for (auto & chunk : *chunks) { - if (chunk.type == MTMD_INPUT_CHUNK_TYPE_IMAGE && chunk.tokens_image) { - delete chunk.tokens_image; - } +void mtmd_image_tokens_free(mtmd_image_tokens * image_tokens) { + if (image_tokens) { + delete image_tokens; } - delete chunks; +} + +size_t mtmd_image_tokens_get_n_tokens(const mtmd_image_tokens * image_tokens) { + return image_tokens->n_tokens(); +} + +size_t mtmd_image_tokens_get_nx(const mtmd_image_tokens * image_tokens) { + return image_tokens->nx; +} + +size_t mtmd_image_tokens_get_ny(const mtmd_image_tokens * image_tokens) { + return image_tokens->ny; +} + +std::string mtmd_image_tokens_get_id(const mtmd_image_tokens * image_tokens) { + return image_tokens->id; } int32_t mtmd_encode(mtmd_context * ctx, const mtmd_image_tokens * image_tokens) { @@ -190,9 +210,9 @@ float * mtmd_get_output_embd(mtmd_context * ctx) { return ctx->image_embd_v.data(); } -size_t mtmd_helper_get_n_tokens(mtmd_input_chunks * chunks) { +size_t mtmd_helper_get_n_tokens(mtmd_input_chunks & chunks) { size_t n_tokens = 0; - for (auto & chunk : *chunks) { + for (auto & chunk : chunks) { if (chunk.type == MTMD_INPUT_CHUNK_TYPE_TEXT) { n_tokens += chunk.tokens_text.size(); } else if (chunk.type == MTMD_INPUT_CHUNK_TYPE_IMAGE) { @@ -241,7 +261,7 @@ struct decode_embd_batch { int32_t mtmd_helper_eval(mtmd_context * ctx, llama_context * lctx, - mtmd_input_chunks * chunks, + mtmd_input_chunks & chunks, llama_pos pos0, llama_seq_id seq_id, int32_t n_batch) { @@ -249,8 +269,8 @@ int32_t mtmd_helper_eval(mtmd_context * ctx, llama_pos n_past = pos0; llama_batch text_batch = llama_batch_init(n_batch, 0, 1); - for (auto & chunk : *chunks) { - bool is_last = &chunk == &chunks->back(); + for (auto & chunk : chunks) { + bool is_last = &chunk == &chunks.back(); if (chunk.type == MTMD_INPUT_CHUNK_TYPE_TEXT) { // TODO @ngxson : may need to split into smaller batches text_batch.n_tokens = chunk.tokens_text.size(); @@ -279,7 +299,7 @@ int32_t mtmd_helper_eval(mtmd_context * ctx, if (ctx->print_timings) { LOG_INF("encoding image...\n"); } - ret = mtmd_encode(ctx, chunk.tokens_image); + ret = mtmd_encode(ctx, chunk.tokens_image.get()); if (ret != 0) { LOG_ERR("failed to encode image\n"); llama_batch_free(text_batch); @@ -289,7 +309,7 @@ int32_t mtmd_helper_eval(mtmd_context * ctx, LOG_INF("image encoded in %" PRId64 " ms\n", ggml_time_ms() - t0); } - int32_t n_tokens = chunk.tokens_image->n_tokens(); + int32_t n_tokens = mtmd_image_tokens_get_n_tokens(chunk.tokens_image.get()); float * embd = mtmd_get_output_embd(ctx); decode_embd_batch batch_img(embd, n_tokens, n_past, 0); int64_t t1 = ggml_time_ms(); @@ -339,3 +359,15 @@ int32_t mtmd_helper_bitmap_init_from_file(const char * fname, mtmd_bitmap & outp std::memcpy(output.data.data(), data, output.nx * output.ny * 3); return 0; } + +bool mtmd_decode_use_non_causal(mtmd_context * ctx) { + projector_type proj_type = clip_get_projector_type(ctx->ctx_clip); + if (proj_type == PROJECTOR_TYPE_GEMMA3) { + return true; + } + return false; +} + +void mtmd_image_tokens_deleter::operator()(mtmd_image_tokens * val) { + mtmd_image_tokens_free(val); +} diff --git a/examples/llava/mtmd.h b/examples/llava/mtmd.h index 598f6947bb092..78be192dd6eb6 100644 --- a/examples/llava/mtmd.h +++ b/examples/llava/mtmd.h @@ -39,12 +39,18 @@ struct mtmd_bitmap { uint32_t nx; uint32_t ny; std::vector<unsigned char> data; + std::string id; // optional user-defined id, for ex: can be set to image hash, useful for KV cache tracking }; +struct mtmd_image_tokens_deleter { + void operator()(mtmd_image_tokens * val); // forward declaration +}; +using mtmd_image_tokens_ptr = std::unique_ptr<mtmd_image_tokens, mtmd_image_tokens_deleter>; + struct mtmd_input_chunk { mtmd_input_chunk_type type; std::vector<llama_token> tokens_text; - mtmd_image_tokens * tokens_image = nullptr; + mtmd_image_tokens_ptr tokens_image; }; using mtmd_input_chunks = std::vector<mtmd_input_chunk>; @@ -82,12 +88,21 @@ MTMD_API void mtmd_free(mtmd_context * ctx); // 3. "<end_of_image>\ndescribe it in detail." // number of bitmaps must be equal to the number of image markers in the prompt // this function is thread-safe (shared ctx) -MTMD_API mtmd_input_chunks * mtmd_tokenize(mtmd_context * ctx, +// return values: +// 0 on success +// 1 on number of images not matching the number of markers +// 2 on image preprocessing error +MTMD_API int32_t mtmd_tokenize(mtmd_context * ctx, + std::vector<mtmd_input_chunk> & output, const mtmd_input_text & text, const std::vector<mtmd_bitmap> & bitmaps); -// free image chunk data -MTMD_API void mtmd_input_chunks_free(mtmd_input_chunks * chunks); +// access mtmd_image_tokens +MTMD_API size_t mtmd_image_tokens_get_n_tokens(const mtmd_image_tokens * image_tokens); +MTMD_API size_t mtmd_image_tokens_get_nx(const mtmd_image_tokens * image_tokens); +MTMD_API size_t mtmd_image_tokens_get_ny(const mtmd_image_tokens * image_tokens); +MTMD_API std::string mtmd_image_tokens_get_id(const mtmd_image_tokens * image_tokens); +MTMD_API void mtmd_image_tokens_free(mtmd_image_tokens * image_tokens); // returns 0 on success MTMD_API int32_t mtmd_encode(mtmd_context * ctx, @@ -96,12 +111,17 @@ MTMD_API int32_t mtmd_encode(mtmd_context * ctx, // get output embeddings from the last encode pass MTMD_API float * mtmd_get_output_embd(mtmd_context * ctx); +// whether we need to set non-causal mask before llama_decode +MTMD_API bool mtmd_decode_use_non_causal(mtmd_context * ctx); + + + // // helper functions (can be implemented based on other functions) // // helper to count the total number of tokens from a list of chunks, useful to keep track of n_past -MTMD_API size_t mtmd_helper_get_n_tokens(mtmd_input_chunks * chunks); +MTMD_API size_t mtmd_helper_get_n_tokens(mtmd_input_chunks & chunks); // helper function that automatically: // 1. run llama_decode() on text chunks @@ -110,7 +130,7 @@ MTMD_API size_t mtmd_helper_get_n_tokens(mtmd_input_chunks * chunks); // otherwise, returns 0 on success MTMD_API int32_t mtmd_helper_eval(mtmd_context * ctx, llama_context * lctx, - mtmd_input_chunks * chunks, + mtmd_input_chunks & chunks, llama_pos pos0, llama_seq_id seq_id, int32_t n_batch); @@ -132,11 +152,6 @@ struct mtmd_context_deleter { }; using mtmd_context_ptr = std::unique_ptr<mtmd_context, mtmd_context_deleter>; -struct mtmd_input_chunks_deleter { - void operator()(mtmd_input_chunks * val) { mtmd_input_chunks_free(val); } -}; -using mtmd_input_chunks_ptr = std::unique_ptr<mtmd_input_chunks, mtmd_input_chunks_deleter>; - #else static_assert(false && "C header is not yet supported by this library");
diff --git a/examples/llava/gemma3-cli.cpp b/examples/llava/gemma3-cli.cpp index 91a07e2a8f40d..de206c85ae80c 100644 --- a/examples/llava/gemma3-cli.cpp +++ b/examples/llava/gemma3-cli.cpp @@ -184,18 +184,19 @@ static int eval_message(gemma3_context & ctx, common_chat_msg & msg, std::vector text.text = formatted_chat.prompt; text.add_special = add_bos; text.parse_special = true; - mtmd_input_chunks_ptr chunks(mtmd_tokenize(ctx.ctx_vision.get(), text, bitmaps)); - if (chunks == nullptr) { - LOG_ERR("Unable to tokenize prompt\n"); + mtmd_input_chunks chunks; + if (res != 0) { + LOG_ERR("Unable to tokenize prompt, res = %d\n", res); - if (mtmd_helper_eval(ctx.ctx_vision.get(), ctx.lctx, chunks.get(), ctx.n_past, 0, ctx.n_batch)) { + if (mtmd_helper_eval(ctx.ctx_vision.get(), ctx.lctx, chunks, ctx.n_past, 0, ctx.n_batch)) { LOG_ERR("Unable to eval prompt\n"); - ctx.n_past += mtmd_helper_get_n_tokens(chunks.get()); + ctx.n_past += mtmd_helper_get_n_tokens(chunks); diff --git a/examples/llava/mtmd.cpp b/examples/llava/mtmd.cpp index 114c274bc1250..3fd5bebc6a7d5 100644 --- a/examples/llava/mtmd.cpp +++ b/examples/llava/mtmd.cpp @@ -16,6 +16,7 @@ struct mtmd_context { struct clip_ctx * ctx_clip; const struct llama_model * text_model; std::vector<float> image_embd_v; // image embedding vector bool print_timings; int n_threads; std::string image_marker; @@ -24,7 +25,11 @@ struct mtmd_context { mtmd_context(const char * mmproj_fname, const llama_model * text_model, - const mtmd_context_params & ctx_params) : print_timings(ctx_params.print_timings), n_threads(ctx_params.n_threads), image_marker(ctx_params.image_marker) { + const mtmd_context_params & ctx_params) : + print_timings(ctx_params.print_timings), + n_threads (ctx_params.n_threads), + image_marker (ctx_params.image_marker) + { clip_context_params ctx_clip_params; ctx_clip_params.use_gpu = ctx_params.use_gpu; ctx_clip_params.verbosity = ctx_params.verbosity; @@ -49,6 +54,7 @@ struct mtmd_image_tokens { uint32_t ny; // number of tokens in y direction uint32_t n_tokens() const { return nx * ny; } clip_image_f32_batch batch_f32; // preprocessed image patches + std::string id; // optional user-defined ID, useful for KV cache tracking mtmd_context * mtmd_init_from_file(const char * mmproj_fname, @@ -88,10 +94,10 @@ static std::vector<llama_token> mtmd_tokenize_text_internal( return result; -mtmd_input_chunks * mtmd_tokenize(mtmd_context * ctx, - const mtmd_input_text & text, - const std::vector<mtmd_bitmap> & bitmaps) { - mtmd_input_chunks * output = new mtmd_input_chunks; +int32_t mtmd_tokenize(mtmd_context * ctx, + std::vector<mtmd_input_chunk> & output, + const mtmd_input_text & text, + const std::vector<mtmd_bitmap> & bitmaps) { auto vocab = llama_model_get_vocab(ctx->text_model); std::string prompt_modified(text.text); @@ -105,9 +111,9 @@ mtmd_input_chunks * mtmd_tokenize(mtmd_context * ctx, string_replace_all(prompt_modified, ctx->image_marker, marker_modified); - std::vector<std::string> parts = string_split_str(text.text, ctx->image_marker); - output->clear(); - output->reserve(parts.size()); + output.reserve(parts.size()); size_t i_img = 0; @@ -123,14 +129,14 @@ mtmd_input_chunks * mtmd_tokenize(mtmd_context * ctx, std::move(tokens), {}, }; + output.emplace_back(std::move(chunk)); if (&parts.back() != &part) { // add image token to middle of 2 parts if (i_img >= bitmaps.size()) { LOG_ERR("%s: error: not enough images for %d parts\n", __func__, (int)parts.size()); + return 1; // shim layer @@ -145,34 +151,48 @@ mtmd_input_chunks * mtmd_tokenize(mtmd_context * ctx, bool ok = clip_image_preprocess(ctx->ctx_clip, img_u8.get(), &batch_f32); if (!ok) { LOG_ERR("Unable to preprocess image\n"); + return 2; + mtmd_image_tokens_ptr image_tokens(new mtmd_image_tokens); image_tokens->nx = clip_n_patches(ctx->ctx_clip); // TODO @ngxson : use clip_n_patches_by_image image_tokens->ny = 1; // TODO image_tokens->batch_f32 = std::move(batch_f32); + image_tokens->id = bitmaps[i_img].id; // optional mtmd_input_chunk chunk{ MTMD_INPUT_CHUNK_TYPE_IMAGE, {}, - image_tokens, + std::move(image_tokens), }; - output->emplace_back(std::move(chunk)); + output.emplace_back(std::move(chunk)); i_img++; } - return output; + return 0; -void mtmd_input_chunks_free(mtmd_input_chunks * chunks) { - delete chunk.tokens_image; - } +void mtmd_image_tokens_free(mtmd_image_tokens * image_tokens) { + if (image_tokens) { + delete image_tokens; - delete chunks; +size_t mtmd_image_tokens_get_n_tokens(const mtmd_image_tokens * image_tokens) { + return image_tokens->n_tokens(); +size_t mtmd_image_tokens_get_nx(const mtmd_image_tokens * image_tokens) { + return image_tokens->nx; +size_t mtmd_image_tokens_get_ny(const mtmd_image_tokens * image_tokens) { + return image_tokens->ny; +std::string mtmd_image_tokens_get_id(const mtmd_image_tokens * image_tokens) { int32_t mtmd_encode(mtmd_context * ctx, const mtmd_image_tokens * image_tokens) { @@ -190,9 +210,9 @@ float * mtmd_get_output_embd(mtmd_context * ctx) { return ctx->image_embd_v.data(); -size_t mtmd_helper_get_n_tokens(mtmd_input_chunks * chunks) { +size_t mtmd_helper_get_n_tokens(mtmd_input_chunks & chunks) { size_t n_tokens = 0; n_tokens += chunk.tokens_text.size(); } else if (chunk.type == MTMD_INPUT_CHUNK_TYPE_IMAGE) { @@ -241,7 +261,7 @@ struct decode_embd_batch { int32_t mtmd_helper_eval(mtmd_context * ctx, llama_context * lctx, - mtmd_input_chunks * chunks, + mtmd_input_chunks & chunks, llama_pos pos0, llama_seq_id seq_id, int32_t n_batch) { @@ -249,8 +269,8 @@ int32_t mtmd_helper_eval(mtmd_context * ctx, llama_pos n_past = pos0; llama_batch text_batch = llama_batch_init(n_batch, 0, 1); - bool is_last = &chunk == &chunks->back(); + bool is_last = &chunk == &chunks.back(); // TODO @ngxson : may need to split into smaller batches text_batch.n_tokens = chunk.tokens_text.size(); @@ -279,7 +299,7 @@ int32_t mtmd_helper_eval(mtmd_context * ctx, if (ctx->print_timings) { LOG_INF("encoding image...\n"); - ret = mtmd_encode(ctx, chunk.tokens_image); + ret = mtmd_encode(ctx, chunk.tokens_image.get()); if (ret != 0) { LOG_ERR("failed to encode image\n"); llama_batch_free(text_batch); @@ -289,7 +309,7 @@ int32_t mtmd_helper_eval(mtmd_context * ctx, LOG_INF("image encoded in %" PRId64 " ms\n", ggml_time_ms() - t0); - int32_t n_tokens = chunk.tokens_image->n_tokens(); + int32_t n_tokens = mtmd_image_tokens_get_n_tokens(chunk.tokens_image.get()); float * embd = mtmd_get_output_embd(ctx); decode_embd_batch batch_img(embd, n_tokens, n_past, 0); int64_t t1 = ggml_time_ms(); @@ -339,3 +359,15 @@ int32_t mtmd_helper_bitmap_init_from_file(const char * fname, mtmd_bitmap & outp std::memcpy(output.data.data(), data, output.nx * output.ny * 3); +bool mtmd_decode_use_non_causal(mtmd_context * ctx) { + projector_type proj_type = clip_get_projector_type(ctx->ctx_clip); + if (proj_type == PROJECTOR_TYPE_GEMMA3) { + return true; + } + return false; +void mtmd_image_tokens_deleter::operator()(mtmd_image_tokens * val) { + mtmd_image_tokens_free(val); diff --git a/examples/llava/mtmd.h b/examples/llava/mtmd.h index 598f6947bb092..78be192dd6eb6 100644 --- a/examples/llava/mtmd.h +++ b/examples/llava/mtmd.h @@ -39,12 +39,18 @@ struct mtmd_bitmap { uint32_t nx; uint32_t ny; std::vector<unsigned char> data; + std::string id; // optional user-defined id, for ex: can be set to image hash, useful for KV cache tracking +struct mtmd_image_tokens_deleter { + void operator()(mtmd_image_tokens * val); // forward declaration +}; +using mtmd_image_tokens_ptr = std::unique_ptr<mtmd_image_tokens, mtmd_image_tokens_deleter>; struct mtmd_input_chunk { mtmd_input_chunk_type type; std::vector<llama_token> tokens_text; + mtmd_image_tokens_ptr tokens_image; using mtmd_input_chunks = std::vector<mtmd_input_chunk>; @@ -82,12 +88,21 @@ MTMD_API void mtmd_free(mtmd_context * ctx); // 3. "<end_of_image>\ndescribe it in detail." // number of bitmaps must be equal to the number of image markers in the prompt // this function is thread-safe (shared ctx) -MTMD_API mtmd_input_chunks * mtmd_tokenize(mtmd_context * ctx, +// return values: +// 0 on success +// 1 on number of images not matching the number of markers +// 2 on image preprocessing error +MTMD_API int32_t mtmd_tokenize(mtmd_context * ctx, const mtmd_input_text & text, const std::vector<mtmd_bitmap> & bitmaps); -// free image chunk data -MTMD_API void mtmd_input_chunks_free(mtmd_input_chunks * chunks); +// access mtmd_image_tokens +MTMD_API size_t mtmd_image_tokens_get_n_tokens(const mtmd_image_tokens * image_tokens); +MTMD_API size_t mtmd_image_tokens_get_nx(const mtmd_image_tokens * image_tokens); +MTMD_API size_t mtmd_image_tokens_get_ny(const mtmd_image_tokens * image_tokens); +MTMD_API std::string mtmd_image_tokens_get_id(const mtmd_image_tokens * image_tokens); // returns 0 on success MTMD_API int32_t mtmd_encode(mtmd_context * ctx, @@ -96,12 +111,17 @@ MTMD_API int32_t mtmd_encode(mtmd_context * ctx, // get output embeddings from the last encode pass MTMD_API float * mtmd_get_output_embd(mtmd_context * ctx); +// whether we need to set non-causal mask before llama_decode // helper functions (can be implemented based on other functions) // helper to count the total number of tokens from a list of chunks, useful to keep track of n_past -MTMD_API size_t mtmd_helper_get_n_tokens(mtmd_input_chunks * chunks); +MTMD_API size_t mtmd_helper_get_n_tokens(mtmd_input_chunks & chunks); // helper function that automatically: // 1. run llama_decode() on text chunks @@ -110,7 +130,7 @@ MTMD_API size_t mtmd_helper_get_n_tokens(mtmd_input_chunks * chunks); // otherwise, returns 0 on success MTMD_API int32_t mtmd_helper_eval(mtmd_context * ctx, llama_context * lctx, - mtmd_input_chunks * chunks, + mtmd_input_chunks & chunks, llama_pos pos0, llama_seq_id seq_id, int32_t n_batch); @@ -132,11 +152,6 @@ struct mtmd_context_deleter { using mtmd_context_ptr = std::unique_ptr<mtmd_context, mtmd_context_deleter>; -struct mtmd_input_chunks_deleter { - void operator()(mtmd_input_chunks * val) { mtmd_input_chunks_free(val); } -}; -using mtmd_input_chunks_ptr = std::unique_ptr<mtmd_input_chunks, mtmd_input_chunks_deleter>; #else static_assert(false && "C header is not yet supported by this library");
[ "+ int32_t res = mtmd_tokenize(ctx.ctx_vision.get(), chunks, text, bitmaps);", "+ std::vector<std::string> parts = string_split_str(prompt_modified, ctx->image_marker);", "+ output.clear();", "- output->emplace_back(std::move(chunk));", "- mtmd_image_tokens * image_tokens = new mtmd_image_tokens;", "- if (chunk.type == MTMD_INPUT_CHUNK_TYPE_IMAGE && chunk.tokens_image) {", "+ return image_tokens->id;", "- mtmd_image_tokens * tokens_image = nullptr;", "+ std::vector<mtmd_input_chunk> & output,", "+MTMD_API void mtmd_image_tokens_free(mtmd_image_tokens * image_tokens);", "+MTMD_API bool mtmd_decode_use_non_causal(mtmd_context * ctx);", "-" ]
[ 12, 84, 85, 94, 115, 140, 163, 252, 267, 278, 287, 318 ]
{ "additions": 92, "author": "ngxson", "deletions": 44, "html_url": "https://github.com/ggml-org/llama.cpp/pull/12906", "issue_id": 12906, "merged_at": "2025-04-18T08:04:51Z", "omission_probability": 0.1, "pr_number": 12906, "repo": "ggml-org/llama.cpp", "title": "mtmd : add methods to access `mtmd_image_tokens`", "total_changes": 136 }
810
diff --git a/examples/rpc/rpc-server.cpp b/examples/rpc/rpc-server.cpp index f8f2cb90ea10d..9db5542570de8 100644 --- a/examples/rpc/rpc-server.cpp +++ b/examples/rpc/rpc-server.cpp @@ -297,7 +297,10 @@ int main(int argc, char * argv[]) { } cache_dir = cache_dir_str.c_str(); } - printf("Starting RPC server\n"); + printf("Starting RPC server v%d.%d.%d\n", + RPC_PROTO_MAJOR_VERSION, + RPC_PROTO_MINOR_VERSION, + RPC_PROTO_PATCH_VERSION); printf(" endpoint : %s\n", endpoint.c_str()); printf(" local cache : %s\n", cache_dir ? cache_dir : "n/a"); printf(" backend memory : %zu MB\n", free_mem / (1024 * 1024)); diff --git a/ggml/include/ggml-rpc.h b/ggml/include/ggml-rpc.h index 4e0d210f8ec97..c8b6097f7e573 100644 --- a/ggml/include/ggml-rpc.h +++ b/ggml/include/ggml-rpc.h @@ -7,6 +7,9 @@ extern "C" { #endif +#define RPC_PROTO_MAJOR_VERSION 1 +#define RPC_PROTO_MINOR_VERSION 0 +#define RPC_PROTO_PATCH_VERSION 0 #define GGML_RPC_MAX_SERVERS 16 // backend API diff --git a/ggml/src/ggml-rpc/ggml-rpc.cpp b/ggml/src/ggml-rpc/ggml-rpc.cpp index 3189ae85d55f9..a0667b7d702b2 100644 --- a/ggml/src/ggml-rpc/ggml-rpc.cpp +++ b/ggml/src/ggml-rpc/ggml-rpc.cpp @@ -92,12 +92,19 @@ enum rpc_cmd { RPC_CMD_GET_DEVICE_MEMORY, RPC_CMD_INIT_TENSOR, RPC_CMD_GET_ALLOC_SIZE, + RPC_CMD_HELLO, RPC_CMD_COUNT, }; // Try RPC_CMD_SET_TENSOR_HASH first when data size is larger than this threshold const size_t HASH_THRESHOLD = 10 * 1024 * 1024; +struct rpc_msg_hello_rsp { + uint8_t major; + uint8_t minor; + uint8_t patch; +}; + struct rpc_msg_get_alloc_size_req { rpc_tensor tensor; }; @@ -400,6 +407,20 @@ static bool send_rpc_cmd(const std::shared_ptr<socket_t> & sock, enum rpc_cmd cm // RPC client-side implementation +static bool check_server_version(const std::shared_ptr<socket_t> & sock) { + rpc_msg_hello_rsp response; + bool status = send_rpc_cmd(sock, RPC_CMD_HELLO, nullptr, 0, &response, sizeof(response)); + GGML_ASSERT(status); + if (response.major != RPC_PROTO_MAJOR_VERSION || response.minor > RPC_PROTO_MINOR_VERSION) { + fprintf(stderr, "RPC server version mismatch: %d.%d.%d\n", response.major, response.minor, response.patch); + return false; + } + if (response.minor != RPC_PROTO_MINOR_VERSION || response.patch != RPC_PROTO_PATCH_VERSION) { + fprintf(stderr, "WARNING: RPC server version mismatch: %d.%d.%d\n", response.major, response.minor, response.patch); + } + return true; +} + static std::shared_ptr<socket_t> get_socket(const std::string & endpoint) { static std::mutex mutex; std::lock_guard<std::mutex> lock(mutex); @@ -433,6 +454,9 @@ static std::shared_ptr<socket_t> get_socket(const std::string & endpoint) { if (sock == nullptr) { return nullptr; } + if (!check_server_version(sock)) { + return nullptr; + } GGML_PRINT_DEBUG("[%s] connected to %s, sockfd=%d\n", __func__, endpoint.c_str(), sock->fd); sockets[endpoint] = sock; return sock; @@ -818,6 +842,7 @@ class rpc_server { } ~rpc_server(); + void hello(rpc_msg_hello_rsp & response); void alloc_buffer(const rpc_msg_alloc_buffer_req & request, rpc_msg_alloc_buffer_rsp & response); void get_alignment(rpc_msg_get_alignment_rsp & response); void get_max_size(rpc_msg_get_max_size_rsp & response); @@ -846,6 +871,13 @@ class rpc_server { std::unordered_set<ggml_backend_buffer_t> buffers; }; +void rpc_server::hello(rpc_msg_hello_rsp & response) { + response.major = RPC_PROTO_MAJOR_VERSION; + response.minor = RPC_PROTO_MINOR_VERSION; + response.patch = RPC_PROTO_PATCH_VERSION; + GGML_PRINT_DEBUG("[%s] version: %d.%d.%d\n", __func__, response.major, response.minor, response.patch); +} + bool rpc_server::get_alloc_size(const rpc_msg_get_alloc_size_req & request, rpc_msg_get_alloc_size_rsp & response) { ggml_backend_buffer_type_t buft; struct ggml_init_params params { @@ -1271,8 +1303,24 @@ rpc_server::~rpc_server() { static void rpc_serve_client(ggml_backend_t backend, const char * cache_dir, sockfd_t sockfd, size_t free_mem, size_t total_mem) { rpc_server server(backend, cache_dir); + uint8_t cmd; + if (!recv_data(sockfd, &cmd, 1)) { + return; + } + // the first command sent by the client must be HELLO + if (cmd != RPC_CMD_HELLO) { + fprintf(stderr, "Expected HELLO command, update client\n"); + return; + } + if (!recv_msg(sockfd, nullptr, 0)) { + return; + } + rpc_msg_hello_rsp response; + server.hello(response); + if (!send_msg(sockfd, &response, sizeof(response))) { + return; + } while (true) { - uint8_t cmd; if (!recv_data(sockfd, &cmd, 1)) { break; } @@ -1282,6 +1330,10 @@ static void rpc_serve_client(ggml_backend_t backend, const char * cache_dir, break; } switch (cmd) { + case RPC_CMD_HELLO: { + // HELLO command is handled above + return; + } case RPC_CMD_ALLOC_BUFFER: { rpc_msg_alloc_buffer_req request; if (!recv_msg(sockfd, &request, sizeof(request))) {
diff --git a/examples/rpc/rpc-server.cpp b/examples/rpc/rpc-server.cpp index f8f2cb90ea10d..9db5542570de8 100644 --- a/examples/rpc/rpc-server.cpp +++ b/examples/rpc/rpc-server.cpp @@ -297,7 +297,10 @@ int main(int argc, char * argv[]) { cache_dir = cache_dir_str.c_str(); - printf("Starting RPC server\n"); + printf("Starting RPC server v%d.%d.%d\n", + RPC_PROTO_MAJOR_VERSION, + RPC_PROTO_MINOR_VERSION, printf(" endpoint : %s\n", endpoint.c_str()); printf(" local cache : %s\n", cache_dir ? cache_dir : "n/a"); printf(" backend memory : %zu MB\n", free_mem / (1024 * 1024)); diff --git a/ggml/include/ggml-rpc.h b/ggml/include/ggml-rpc.h index 4e0d210f8ec97..c8b6097f7e573 100644 --- a/ggml/include/ggml-rpc.h +++ b/ggml/include/ggml-rpc.h @@ -7,6 +7,9 @@ extern "C" { #endif +#define RPC_PROTO_MAJOR_VERSION 1 +#define RPC_PROTO_PATCH_VERSION 0 #define GGML_RPC_MAX_SERVERS 16 // backend API diff --git a/ggml/src/ggml-rpc/ggml-rpc.cpp b/ggml/src/ggml-rpc/ggml-rpc.cpp index 3189ae85d55f9..a0667b7d702b2 100644 --- a/ggml/src/ggml-rpc/ggml-rpc.cpp +++ b/ggml/src/ggml-rpc/ggml-rpc.cpp @@ -92,12 +92,19 @@ enum rpc_cmd { RPC_CMD_GET_DEVICE_MEMORY, RPC_CMD_INIT_TENSOR, RPC_CMD_GET_ALLOC_SIZE, + RPC_CMD_HELLO, RPC_CMD_COUNT, // Try RPC_CMD_SET_TENSOR_HASH first when data size is larger than this threshold const size_t HASH_THRESHOLD = 10 * 1024 * 1024; +struct rpc_msg_hello_rsp { + uint8_t major; + uint8_t minor; + uint8_t patch; struct rpc_msg_get_alloc_size_req { rpc_tensor tensor; @@ -400,6 +407,20 @@ static bool send_rpc_cmd(const std::shared_ptr<socket_t> & sock, enum rpc_cmd cm // RPC client-side implementation +static bool check_server_version(const std::shared_ptr<socket_t> & sock) { + bool status = send_rpc_cmd(sock, RPC_CMD_HELLO, nullptr, 0, &response, sizeof(response)); + GGML_ASSERT(status); + if (response.major != RPC_PROTO_MAJOR_VERSION || response.minor > RPC_PROTO_MINOR_VERSION) { + return false; + if (response.minor != RPC_PROTO_MINOR_VERSION || response.patch != RPC_PROTO_PATCH_VERSION) { + fprintf(stderr, "WARNING: RPC server version mismatch: %d.%d.%d\n", response.major, response.minor, response.patch); + return true; static std::shared_ptr<socket_t> get_socket(const std::string & endpoint) { static std::mutex mutex; std::lock_guard<std::mutex> lock(mutex); @@ -433,6 +454,9 @@ static std::shared_ptr<socket_t> get_socket(const std::string & endpoint) { if (sock == nullptr) { return nullptr; + if (!check_server_version(sock)) { + return nullptr; GGML_PRINT_DEBUG("[%s] connected to %s, sockfd=%d\n", __func__, endpoint.c_str(), sock->fd); sockets[endpoint] = sock; return sock; @@ -818,6 +842,7 @@ class rpc_server { ~rpc_server(); void alloc_buffer(const rpc_msg_alloc_buffer_req & request, rpc_msg_alloc_buffer_rsp & response); void get_alignment(rpc_msg_get_alignment_rsp & response); void get_max_size(rpc_msg_get_max_size_rsp & response); @@ -846,6 +871,13 @@ class rpc_server { std::unordered_set<ggml_backend_buffer_t> buffers; +void rpc_server::hello(rpc_msg_hello_rsp & response) { + response.major = RPC_PROTO_MAJOR_VERSION; + response.minor = RPC_PROTO_MINOR_VERSION; + response.patch = RPC_PROTO_PATCH_VERSION; + GGML_PRINT_DEBUG("[%s] version: %d.%d.%d\n", __func__, response.major, response.minor, response.patch); bool rpc_server::get_alloc_size(const rpc_msg_get_alloc_size_req & request, rpc_msg_get_alloc_size_rsp & response) { ggml_backend_buffer_type_t buft; struct ggml_init_params params { @@ -1271,8 +1303,24 @@ rpc_server::~rpc_server() { static void rpc_serve_client(ggml_backend_t backend, const char * cache_dir, sockfd_t sockfd, size_t free_mem, size_t total_mem) { rpc_server server(backend, cache_dir); + uint8_t cmd; + if (!recv_data(sockfd, &cmd, 1)) { + // the first command sent by the client must be HELLO + if (cmd != RPC_CMD_HELLO) { + fprintf(stderr, "Expected HELLO command, update client\n"); + if (!recv_msg(sockfd, nullptr, 0)) { + if (!send_msg(sockfd, &response, sizeof(response))) { while (true) { - uint8_t cmd; if (!recv_data(sockfd, &cmd, 1)) { @@ -1282,6 +1330,10 @@ static void rpc_serve_client(ggml_backend_t backend, const char * cache_dir, switch (cmd) { + case RPC_CMD_HELLO: { + // HELLO command is handled above + } case RPC_CMD_ALLOC_BUFFER: { rpc_msg_alloc_buffer_req request; if (!recv_msg(sockfd, &request, sizeof(request))) {
[ "+ RPC_PROTO_PATCH_VERSION);", "+#define RPC_PROTO_MINOR_VERSION 0", "+};", "+ fprintf(stderr, \"RPC server version mismatch: %d.%d.%d\\n\", response.major, response.minor, response.patch);", "+ void hello(rpc_msg_hello_rsp & response);", "+ server.hello(response);", "+ return;" ]
[ 12, 25, 49, 63, 89, 124, 139 ]
{ "additions": 60, "author": "rgerganov", "deletions": 2, "html_url": "https://github.com/ggml-org/llama.cpp/pull/12955", "issue_id": 12955, "merged_at": "2025-04-18T07:13:43Z", "omission_probability": 0.1, "pr_number": 12955, "repo": "ggml-org/llama.cpp", "title": "rpc : add RPC_CMD_HELLO", "total_changes": 62 }
811
diff --git a/ggml/src/ggml-cuda/ggml-cuda.cu b/ggml/src/ggml-cuda/ggml-cuda.cu index bab85809ac068..a7febef723c2e 100644 --- a/ggml/src/ggml-cuda/ggml-cuda.cu +++ b/ggml/src/ggml-cuda/ggml-cuda.cu @@ -3237,6 +3237,10 @@ static bool ggml_backend_cuda_device_supports_op(ggml_backend_dev_t dev, const g if (op->src[0]->ne[0] == 192) { return false; } + if (op->src[0]->ne[0] == 576) { + // DeepSeek MLA + return false; + } if (op->src[0]->ne[3] != 1) { return false; } diff --git a/ggml/src/ggml-metal/ggml-metal.m b/ggml/src/ggml-metal/ggml-metal.m index 16ca08383c7ed..85f3ae7bfdc31 100644 --- a/ggml/src/ggml-metal/ggml-metal.m +++ b/ggml/src/ggml-metal/ggml-metal.m @@ -354,6 +354,7 @@ static void ggml_backend_metal_device_rel(struct ggml_backend_metal_device_conte GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_F16_H192, GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_F16_HK192_HV128, GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_F16_H256, + GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_F16_HK576_HV512, GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_BF16_H64, GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_BF16_H80, GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_BF16_H96, @@ -362,6 +363,7 @@ static void ggml_backend_metal_device_rel(struct ggml_backend_metal_device_conte GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_BF16_H192, GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_BF16_HK192_HV128, GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_BF16_H256, + GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_BF16_HK576_HV512, GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_Q4_0_H64, GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_Q4_0_H80, GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_Q4_0_H96, @@ -370,6 +372,7 @@ static void ggml_backend_metal_device_rel(struct ggml_backend_metal_device_conte GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_Q4_0_H192, GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_Q4_0_HK192_HV128, GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_Q4_0_H256, + GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_Q4_0_HK576_HV512, GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_Q4_1_H64, GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_Q4_1_H80, GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_Q4_1_H96, @@ -378,6 +381,7 @@ static void ggml_backend_metal_device_rel(struct ggml_backend_metal_device_conte GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_Q4_1_H192, GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_Q4_1_HK192_HV128, GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_Q4_1_H256, + GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_Q4_1_HK576_HV512, GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_Q5_0_H64, GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_Q5_0_H80, GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_Q5_0_H96, @@ -386,6 +390,7 @@ static void ggml_backend_metal_device_rel(struct ggml_backend_metal_device_conte GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_Q5_0_H192, GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_Q5_0_HK192_HV128, GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_Q5_0_H256, + GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_Q5_0_HK576_HV512, GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_Q5_1_H64, GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_Q5_1_H80, GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_Q5_1_H96, @@ -394,6 +399,7 @@ static void ggml_backend_metal_device_rel(struct ggml_backend_metal_device_conte GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_Q5_1_H192, GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_Q5_1_HK192_HV128, GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_Q5_1_H256, + GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_Q5_1_HK576_HV512, GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_Q8_0_H64, GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_Q8_0_H80, GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_Q8_0_H96, @@ -402,6 +408,7 @@ static void ggml_backend_metal_device_rel(struct ggml_backend_metal_device_conte GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_Q8_0_H192, GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_Q8_0_HK192_HV128, GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_Q8_0_H256, + GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_Q8_0_HK576_HV512, GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_VEC_F16_H96, GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_VEC_BF16_H96, GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_VEC_Q4_0_H96, @@ -437,6 +444,13 @@ static void ggml_backend_metal_device_rel(struct ggml_backend_metal_device_conte GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_VEC_Q5_0_H256, GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_VEC_Q5_1_H256, GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_VEC_Q8_0_H256, + GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_VEC_F16_HK576_HV512, + GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_VEC_BF16_HK576_HV512, + GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_VEC_Q4_0_HK576_HV512, + GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_VEC_Q4_1_HK576_HV512, + GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_VEC_Q5_0_HK576_HV512, + GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_VEC_Q5_1_HK576_HV512, + GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_VEC_Q8_0_HK576_HV512, GGML_METAL_KERNEL_TYPE_SET_I32, GGML_METAL_KERNEL_TYPE_SET_F32, GGML_METAL_KERNEL_TYPE_CPY_F32_F32, @@ -1018,6 +1032,7 @@ @implementation GGMLMetalClass GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_F16_H192, flash_attn_ext_f16_h192, has_simdgroup_mm); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_F16_HK192_HV128, flash_attn_ext_f16_hk192_hv128, has_simdgroup_mm); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_F16_H256, flash_attn_ext_f16_h256, has_simdgroup_mm); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_F16_HK576_HV512, flash_attn_ext_f16_hk576_hv512, has_simdgroup_mm); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_BF16_H64, flash_attn_ext_bf16_h64, has_simdgroup_mm && use_bfloat); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_BF16_H80, flash_attn_ext_bf16_h80, has_simdgroup_mm && use_bfloat); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_BF16_H96, flash_attn_ext_bf16_h96, has_simdgroup_mm && use_bfloat); @@ -1026,6 +1041,7 @@ @implementation GGMLMetalClass GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_BF16_H192, flash_attn_ext_bf16_h192, has_simdgroup_mm && use_bfloat); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_BF16_HK192_HV128, flash_attn_ext_bf16_hk192_hv128, has_simdgroup_mm && use_bfloat); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_BF16_H256, flash_attn_ext_bf16_h256, has_simdgroup_mm && use_bfloat); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_BF16_HK576_HV512, flash_attn_ext_bf16_hk576_hv512, has_simdgroup_mm && use_bfloat); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_Q4_0_H64, flash_attn_ext_q4_0_h64, has_simdgroup_mm); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_Q4_0_H80, flash_attn_ext_q4_0_h80, has_simdgroup_mm); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_Q4_0_H96, flash_attn_ext_q4_0_h96, has_simdgroup_mm); @@ -1034,6 +1050,7 @@ @implementation GGMLMetalClass GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_Q4_0_H192, flash_attn_ext_q4_0_h192, has_simdgroup_mm); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_Q4_0_HK192_HV128, flash_attn_ext_q4_0_hk192_hv128, has_simdgroup_mm); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_Q4_0_H256, flash_attn_ext_q4_0_h256, has_simdgroup_mm); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_Q4_0_HK576_HV512, flash_attn_ext_q4_0_hk576_hv512, has_simdgroup_mm); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_Q4_1_H64, flash_attn_ext_q4_1_h64, has_simdgroup_mm); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_Q4_1_H80, flash_attn_ext_q4_1_h80, has_simdgroup_mm); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_Q4_1_H96, flash_attn_ext_q4_1_h96, has_simdgroup_mm); @@ -1042,6 +1059,7 @@ @implementation GGMLMetalClass GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_Q4_1_H192, flash_attn_ext_q4_1_h192, has_simdgroup_mm); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_Q4_1_HK192_HV128, flash_attn_ext_q4_1_hk192_hv128, has_simdgroup_mm); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_Q4_1_H256, flash_attn_ext_q4_1_h256, has_simdgroup_mm); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_Q4_1_HK576_HV512, flash_attn_ext_q4_1_hk576_hv512, has_simdgroup_mm); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_Q5_0_H64, flash_attn_ext_q5_0_h64, has_simdgroup_mm); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_Q5_0_H80, flash_attn_ext_q5_0_h80, has_simdgroup_mm); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_Q5_0_H96, flash_attn_ext_q5_0_h96, has_simdgroup_mm); @@ -1050,6 +1068,7 @@ @implementation GGMLMetalClass GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_Q5_0_H192, flash_attn_ext_q5_0_h192, has_simdgroup_mm); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_Q5_0_HK192_HV128, flash_attn_ext_q5_0_hk192_hv128, has_simdgroup_mm); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_Q5_0_H256, flash_attn_ext_q5_0_h256, has_simdgroup_mm); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_Q5_0_HK576_HV512, flash_attn_ext_q5_0_hk576_hv512, has_simdgroup_mm); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_Q5_1_H64, flash_attn_ext_q5_1_h64, has_simdgroup_mm); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_Q5_1_H80, flash_attn_ext_q5_1_h80, has_simdgroup_mm); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_Q5_1_H96, flash_attn_ext_q5_1_h96, has_simdgroup_mm); @@ -1058,6 +1077,7 @@ @implementation GGMLMetalClass GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_Q5_1_H192, flash_attn_ext_q5_1_h192, has_simdgroup_mm); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_Q5_1_HK192_HV128, flash_attn_ext_q5_1_hk192_hv128, has_simdgroup_mm); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_Q5_1_H256, flash_attn_ext_q5_1_h256, has_simdgroup_mm); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_Q5_1_HK576_HV512, flash_attn_ext_q5_1_hk576_hv512, has_simdgroup_mm); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_Q8_0_H64, flash_attn_ext_q8_0_h64, has_simdgroup_mm); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_Q8_0_H80, flash_attn_ext_q8_0_h80, has_simdgroup_mm); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_Q8_0_H96, flash_attn_ext_q8_0_h96, has_simdgroup_mm); @@ -1066,6 +1086,7 @@ @implementation GGMLMetalClass GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_Q8_0_H192, flash_attn_ext_q8_0_h192, has_simdgroup_mm); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_Q8_0_HK192_HV128, flash_attn_ext_q8_0_hk192_hv128, has_simdgroup_mm); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_Q8_0_H256, flash_attn_ext_q8_0_h256, has_simdgroup_mm); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_Q8_0_HK576_HV512, flash_attn_ext_q8_0_hk576_hv512, has_simdgroup_mm); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_VEC_F16_H96, flash_attn_ext_vec_f16_h96, has_simdgroup_reduction); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_VEC_BF16_H96, flash_attn_ext_vec_bf16_h96, has_simdgroup_reduction && use_bfloat); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_VEC_Q4_0_H96, flash_attn_ext_vec_q4_0_h96, has_simdgroup_reduction); @@ -1101,6 +1122,13 @@ @implementation GGMLMetalClass GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_VEC_Q5_0_H256, flash_attn_ext_vec_q5_0_h256, has_simdgroup_reduction); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_VEC_Q5_1_H256, flash_attn_ext_vec_q5_1_h256, has_simdgroup_reduction); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_VEC_Q8_0_H256, flash_attn_ext_vec_q8_0_h256, has_simdgroup_reduction); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_VEC_F16_HK576_HV512, flash_attn_ext_vec_f16_hk576_hv512, has_simdgroup_reduction); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_VEC_BF16_HK576_HV512, flash_attn_ext_vec_bf16_hk576_hv512, has_simdgroup_reduction && use_bfloat); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_VEC_Q4_0_HK576_HV512, flash_attn_ext_vec_q4_0_hk576_hv512, has_simdgroup_reduction); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_VEC_Q4_1_HK576_HV512, flash_attn_ext_vec_q4_1_hk576_hv512, has_simdgroup_reduction); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_VEC_Q5_0_HK576_HV512, flash_attn_ext_vec_q5_0_hk576_hv512, has_simdgroup_reduction); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_VEC_Q5_1_HK576_HV512, flash_attn_ext_vec_q5_1_hk576_hv512, has_simdgroup_reduction); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_VEC_Q8_0_HK576_HV512, flash_attn_ext_vec_q8_0_hk576_hv512, has_simdgroup_reduction); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_SET_F32, set_f32, true); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_SET_I32, set_i32, true); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_CPY_F32_F32, cpy_f32_f32, true); @@ -1365,6 +1393,11 @@ static bool ggml_metal_supports_op(const struct ggml_backend_metal_device_contex // TODO: not sure if it is worth adding kernels for this size return false; } + if (op->src[0]->ne[0] == 576) { + // DeepSeek sizes + // TODO: disabled for now, until optmized + return false; + } if (op->src[1]->type != op->src[2]->type) { return false; } @@ -3857,12 +3890,14 @@ static void ggml_metal_encode_node( // TODO: add vec kernels for (ne00%64 == 0) and maybe also for (ne00%32 == 0) // for now avoiding mainly to keep the number of templates/kernels a bit lower // these are now trivial to add after: https://github.com/ggml-org/llama.cpp/pull/12612 - if (ne01 >= 4 || (ne00%128 != 0 && ne00 != 96 && ne00 != 192)) { + if (ne01 >= 4 || (ne00%128 != 0 && ne00 != 96 && ne00 != 192 && ne00 != 576)) { switch (src1->type) { case GGML_TYPE_F16: { if (ne00 == 192 && ne20 == 128) { pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_F16_HK192_HV128].pipeline; + } else if (ne00 == 576 && ne20 == 512) { + pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_F16_HK576_HV512].pipeline; } else { switch (ne00) { case 64: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_F16_H64 ].pipeline; break; @@ -3885,6 +3920,8 @@ static void ggml_metal_encode_node( { if (ne00 == 192 && ne20 == 128) { pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_BF16_HK192_HV128].pipeline; + } else if (ne00 == 576 && ne20 == 512) { + pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_BF16_HK576_HV512].pipeline; } else { switch (ne00) { case 64: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_BF16_H64 ].pipeline; break; @@ -3907,6 +3944,8 @@ static void ggml_metal_encode_node( { if (ne00 == 192 && ne20 == 128) { pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_Q4_0_HK192_HV128].pipeline; + } else if (ne00 == 576 && ne20 == 512) { + pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_Q4_0_HK576_HV512].pipeline; } else { switch (ne00) { case 64: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_Q4_0_H64 ].pipeline; break; @@ -3929,6 +3968,8 @@ static void ggml_metal_encode_node( { if (ne00 == 192 && ne20 == 128) { pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_Q4_1_HK192_HV128].pipeline; + } else if (ne00 == 576 && ne20 == 512) { + pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_Q4_1_HK576_HV512].pipeline; } else { switch (ne00) { case 64: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_Q4_1_H64 ].pipeline; break; @@ -3951,6 +3992,8 @@ static void ggml_metal_encode_node( { if (ne00 == 192 && ne20 == 128) { pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_Q5_0_HK192_HV128].pipeline; + } else if (ne00 == 576 && ne20 == 512) { + pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_Q5_0_HK576_HV512].pipeline; } else { switch (ne00) { case 64: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_Q5_0_H64 ].pipeline; break; @@ -3973,6 +4016,8 @@ static void ggml_metal_encode_node( { if (ne00 == 192 && ne20 == 128) { pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_Q5_1_HK192_HV128].pipeline; + } else if (ne00 == 576 && ne20 == 512) { + pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_Q5_1_HK576_HV512].pipeline; } else { switch (ne00) { case 64: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_Q5_1_H64 ].pipeline; break; @@ -3995,6 +4040,8 @@ static void ggml_metal_encode_node( { if (ne00 == 192 && ne20 == 128) { pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_Q8_0_HK192_HV128].pipeline; + } else if (ne00 == 576 && ne20 == 512) { + pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_Q8_0_HK576_HV512].pipeline; } else { switch (ne00) { case 64: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_Q8_0_H64 ].pipeline; break; @@ -4114,12 +4161,36 @@ static void ggml_metal_encode_node( } } } break; + case 576: + { + if (ne20 == 512) { + switch (src1->type) { + case GGML_TYPE_F16: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_VEC_F16_HK576_HV512].pipeline; break; + case GGML_TYPE_BF16: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_VEC_BF16_HK576_HV512].pipeline; break; + case GGML_TYPE_Q4_0: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_VEC_Q4_0_HK576_HV512].pipeline; break; + case GGML_TYPE_Q4_1: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_VEC_Q4_1_HK576_HV512].pipeline; break; + case GGML_TYPE_Q5_0: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_VEC_Q5_0_HK576_HV512].pipeline; break; + case GGML_TYPE_Q5_1: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_VEC_Q5_1_HK576_HV512].pipeline; break; + case GGML_TYPE_Q8_0: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_VEC_Q8_0_HK576_HV512].pipeline; break; + default: + { + GGML_LOG_ERROR("unsupported type: %d\n", src1->type); + GGML_LOG_ERROR("add template specialization for this type\n"); + GGML_ABORT("add template specialization for this type"); + } + } + } else { + GGML_LOG_ERROR("unsupported size: %lld\n", ne20); + GGML_LOG_ERROR("add template specialization for this size\n"); + GGML_ABORT("add template specialization for this size"); + } + } break; default: - { - GGML_LOG_ERROR("unsupported size: %lld\n", ne00); - GGML_LOG_ERROR("add template specialization for this size\n"); - GGML_ABORT("add template specialization for this size"); - } + { + GGML_LOG_ERROR("unsupported size: %lld\n", ne00); + GGML_LOG_ERROR("add template specialization for this size\n"); + GGML_ABORT("add template specialization for this size"); + } } } diff --git a/ggml/src/ggml-metal/ggml-metal.metal b/ggml/src/ggml-metal/ggml-metal.metal index 0fb28238a8adb..dc7eab03ee8a2 100644 --- a/ggml/src/ggml-metal/ggml-metal.metal +++ b/ggml/src/ggml-metal/ggml-metal.metal @@ -3546,6 +3546,7 @@ template [[host_name("kernel_flash_attn_ext_f16_h128")]] kernel flash_at template [[host_name("kernel_flash_attn_ext_f16_h192")]] kernel flash_attn_ext_t kernel_flash_attn_ext<FA_TYPES, half4x4, 1, dequantize_f16, half4x4, 1, dequantize_f16, 192, 192>; template [[host_name("kernel_flash_attn_ext_f16_hk192_hv128")]] kernel flash_attn_ext_t kernel_flash_attn_ext<FA_TYPES, half4x4, 1, dequantize_f16, half4x4, 1, dequantize_f16, 192, 128>; template [[host_name("kernel_flash_attn_ext_f16_h256")]] kernel flash_attn_ext_t kernel_flash_attn_ext<FA_TYPES, half4x4, 1, dequantize_f16, half4x4, 1, dequantize_f16, 256, 256>; +template [[host_name("kernel_flash_attn_ext_f16_hk576_hv512")]] kernel flash_attn_ext_t kernel_flash_attn_ext<FA_TYPES, half4x4, 1, dequantize_f16, half4x4, 1, dequantize_f16, 576, 512>; #if defined(GGML_METAL_USE_BF16) template [[host_name("kernel_flash_attn_ext_bf16_h64" )]] kernel flash_attn_ext_t kernel_flash_attn_ext<FA_TYPES, bfloat4x4, 1, dequantize_bf16, bfloat4x4, 1, dequantize_bf16, 64, 64>; @@ -3556,6 +3557,7 @@ template [[host_name("kernel_flash_attn_ext_bf16_h128")]] kernel flash_at template [[host_name("kernel_flash_attn_ext_bf16_h192")]] kernel flash_attn_ext_t kernel_flash_attn_ext<FA_TYPES, bfloat4x4, 1, dequantize_bf16, bfloat4x4, 1, dequantize_bf16, 192, 192>; template [[host_name("kernel_flash_attn_ext_bf16_hk192_hv128")]] kernel flash_attn_ext_t kernel_flash_attn_ext<FA_TYPES, bfloat4x4, 1, dequantize_bf16, bfloat4x4, 1, dequantize_bf16, 192, 128>; template [[host_name("kernel_flash_attn_ext_bf16_h256")]] kernel flash_attn_ext_t kernel_flash_attn_ext<FA_TYPES, bfloat4x4, 1, dequantize_bf16, bfloat4x4, 1, dequantize_bf16, 256, 256>; +template [[host_name("kernel_flash_attn_ext_bf16_hk576_hv512")]] kernel flash_attn_ext_t kernel_flash_attn_ext<FA_TYPES, bfloat4x4, 1, dequantize_bf16, bfloat4x4, 1, dequantize_bf16, 576, 512>; #endif template [[host_name("kernel_flash_attn_ext_q4_0_h64" )]] kernel flash_attn_ext_t kernel_flash_attn_ext<FA_TYPES, block_q4_0, 2, dequantize_q4_0, block_q4_0, 2, dequantize_q4_0, 64, 64>; @@ -3566,6 +3568,7 @@ template [[host_name("kernel_flash_attn_ext_q4_0_h128")]] kernel flash_at template [[host_name("kernel_flash_attn_ext_q4_0_h192")]] kernel flash_attn_ext_t kernel_flash_attn_ext<FA_TYPES, block_q4_0, 2, dequantize_q4_0, block_q4_0, 2, dequantize_q4_0, 192, 192>; template [[host_name("kernel_flash_attn_ext_q4_0_hk192_hv128")]] kernel flash_attn_ext_t kernel_flash_attn_ext<FA_TYPES, block_q4_0, 2, dequantize_q4_0, block_q4_0, 2, dequantize_q4_0, 192, 128>; template [[host_name("kernel_flash_attn_ext_q4_0_h256")]] kernel flash_attn_ext_t kernel_flash_attn_ext<FA_TYPES, block_q4_0, 2, dequantize_q4_0, block_q4_0, 2, dequantize_q4_0, 256, 256>; +template [[host_name("kernel_flash_attn_ext_q4_0_hk576_hv512")]] kernel flash_attn_ext_t kernel_flash_attn_ext<FA_TYPES, block_q4_0, 2, dequantize_q4_0, block_q4_0, 2, dequantize_q4_0, 576, 512>; template [[host_name("kernel_flash_attn_ext_q4_1_h64" )]] kernel flash_attn_ext_t kernel_flash_attn_ext<FA_TYPES, block_q4_1, 2, dequantize_q4_1, block_q4_1, 2, dequantize_q4_1, 64, 64>; template [[host_name("kernel_flash_attn_ext_q4_1_h80" )]] kernel flash_attn_ext_t kernel_flash_attn_ext<FA_TYPES, block_q4_1, 2, dequantize_q4_1, block_q4_1, 2, dequantize_q4_1, 80, 80>; @@ -3575,6 +3578,7 @@ template [[host_name("kernel_flash_attn_ext_q4_1_h128")]] kernel flash_at template [[host_name("kernel_flash_attn_ext_q4_1_h192")]] kernel flash_attn_ext_t kernel_flash_attn_ext<FA_TYPES, block_q4_1, 2, dequantize_q4_1, block_q4_1, 2, dequantize_q4_1, 192, 192>; template [[host_name("kernel_flash_attn_ext_q4_1_hk192_hv128")]] kernel flash_attn_ext_t kernel_flash_attn_ext<FA_TYPES, block_q4_1, 2, dequantize_q4_1, block_q4_1, 2, dequantize_q4_1, 192, 128>; template [[host_name("kernel_flash_attn_ext_q4_1_h256")]] kernel flash_attn_ext_t kernel_flash_attn_ext<FA_TYPES, block_q4_1, 2, dequantize_q4_1, block_q4_1, 2, dequantize_q4_1, 256, 256>; +template [[host_name("kernel_flash_attn_ext_q4_1_hk576_hv512")]] kernel flash_attn_ext_t kernel_flash_attn_ext<FA_TYPES, block_q4_1, 2, dequantize_q4_1, block_q4_1, 2, dequantize_q4_1, 576, 512>; template [[host_name("kernel_flash_attn_ext_q5_0_h64" )]] kernel flash_attn_ext_t kernel_flash_attn_ext<FA_TYPES, block_q5_0, 2, dequantize_q5_0, block_q5_0, 2, dequantize_q5_0, 64, 64>; template [[host_name("kernel_flash_attn_ext_q5_0_h80" )]] kernel flash_attn_ext_t kernel_flash_attn_ext<FA_TYPES, block_q5_0, 2, dequantize_q5_0, block_q5_0, 2, dequantize_q5_0, 80, 80>; @@ -3584,6 +3588,7 @@ template [[host_name("kernel_flash_attn_ext_q5_0_h128")]] kernel flash_at template [[host_name("kernel_flash_attn_ext_q5_0_h192")]] kernel flash_attn_ext_t kernel_flash_attn_ext<FA_TYPES, block_q5_0, 2, dequantize_q5_0, block_q5_0, 2, dequantize_q5_0, 192, 192>; template [[host_name("kernel_flash_attn_ext_q5_0_hk192_hv128")]] kernel flash_attn_ext_t kernel_flash_attn_ext<FA_TYPES, block_q5_0, 2, dequantize_q5_0, block_q5_0, 2, dequantize_q5_0, 192, 128>; template [[host_name("kernel_flash_attn_ext_q5_0_h256")]] kernel flash_attn_ext_t kernel_flash_attn_ext<FA_TYPES, block_q5_0, 2, dequantize_q5_0, block_q5_0, 2, dequantize_q5_0, 256, 256>; +template [[host_name("kernel_flash_attn_ext_q5_0_hk576_hv512")]] kernel flash_attn_ext_t kernel_flash_attn_ext<FA_TYPES, block_q5_0, 2, dequantize_q5_0, block_q5_0, 2, dequantize_q5_0, 576, 512>; template [[host_name("kernel_flash_attn_ext_q5_1_h64" )]] kernel flash_attn_ext_t kernel_flash_attn_ext<FA_TYPES, block_q5_1, 2, dequantize_q5_1, block_q5_1, 2, dequantize_q5_1, 64, 64>; template [[host_name("kernel_flash_attn_ext_q5_1_h80" )]] kernel flash_attn_ext_t kernel_flash_attn_ext<FA_TYPES, block_q5_1, 2, dequantize_q5_1, block_q5_1, 2, dequantize_q5_1, 80, 80>; @@ -3593,6 +3598,7 @@ template [[host_name("kernel_flash_attn_ext_q5_1_h128")]] kernel flash_at template [[host_name("kernel_flash_attn_ext_q5_1_h192")]] kernel flash_attn_ext_t kernel_flash_attn_ext<FA_TYPES, block_q5_1, 2, dequantize_q5_1, block_q5_1, 2, dequantize_q5_1, 192, 192>; template [[host_name("kernel_flash_attn_ext_q5_1_hk192_hv128")]] kernel flash_attn_ext_t kernel_flash_attn_ext<FA_TYPES, block_q5_1, 2, dequantize_q5_1, block_q5_1, 2, dequantize_q5_1, 192, 128>; template [[host_name("kernel_flash_attn_ext_q5_1_h256")]] kernel flash_attn_ext_t kernel_flash_attn_ext<FA_TYPES, block_q5_1, 2, dequantize_q5_1, block_q5_1, 2, dequantize_q5_1, 256, 256>; +template [[host_name("kernel_flash_attn_ext_q5_1_hk576_hv512")]] kernel flash_attn_ext_t kernel_flash_attn_ext<FA_TYPES, block_q5_1, 2, dequantize_q5_1, block_q5_1, 2, dequantize_q5_1, 576, 512>; template [[host_name("kernel_flash_attn_ext_q8_0_h64" )]] kernel flash_attn_ext_t kernel_flash_attn_ext<FA_TYPES, block_q8_0, 2, dequantize_q8_0, block_q8_0, 2, dequantize_q8_0, 64, 64>; template [[host_name("kernel_flash_attn_ext_q8_0_h80" )]] kernel flash_attn_ext_t kernel_flash_attn_ext<FA_TYPES, block_q8_0, 2, dequantize_q8_0, block_q8_0, 2, dequantize_q8_0, 80, 80>; @@ -3602,6 +3608,7 @@ template [[host_name("kernel_flash_attn_ext_q8_0_h128")]] kernel flash_at template [[host_name("kernel_flash_attn_ext_q8_0_h192")]] kernel flash_attn_ext_t kernel_flash_attn_ext<FA_TYPES, block_q8_0, 2, dequantize_q8_0, block_q8_0, 2, dequantize_q8_0, 192, 192>; template [[host_name("kernel_flash_attn_ext_q8_0_hk192_hv128")]] kernel flash_attn_ext_t kernel_flash_attn_ext<FA_TYPES, block_q8_0, 2, dequantize_q8_0, block_q8_0, 2, dequantize_q8_0, 192, 128>; template [[host_name("kernel_flash_attn_ext_q8_0_h256")]] kernel flash_attn_ext_t kernel_flash_attn_ext<FA_TYPES, block_q8_0, 2, dequantize_q8_0, block_q8_0, 2, dequantize_q8_0, 256, 256>; +template [[host_name("kernel_flash_attn_ext_q8_0_hk576_hv512")]] kernel flash_attn_ext_t kernel_flash_attn_ext<FA_TYPES, block_q8_0, 2, dequantize_q8_0, block_q8_0, 2, dequantize_q8_0, 576, 512>; #undef FA_TYPES @@ -4009,6 +4016,16 @@ template [[host_name("kernel_flash_attn_ext_vec_q5_0_h256")]] kernel flash_attn_ template [[host_name("kernel_flash_attn_ext_vec_q5_1_h256")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec<FA_TYPES, block_q5_1, 8, dequantize_q5_1_t4, block_q5_1, 8, dequantize_q5_1_t4, 256, 256, 4>; template [[host_name("kernel_flash_attn_ext_vec_q8_0_h256")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec<FA_TYPES, block_q8_0, 8, dequantize_q8_0_t4, block_q8_0, 8, dequantize_q8_0_t4, 256, 256, 4>; +template [[host_name("kernel_flash_attn_ext_vec_f16_hk576_hv512")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec<FA_TYPES, half4, 1, dequantize_f16_t4, half4, 1, dequantize_f16_t4, 576, 512, 2>; +#if defined(GGML_METAL_USE_BF16) +template [[host_name("kernel_flash_attn_ext_vec_bf16_hk576_hv512")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec<FA_TYPES, bfloat4, 1, dequantize_bf16_t4, bfloat4, 1, dequantize_bf16_t4, 576, 512, 2>; +#endif +template [[host_name("kernel_flash_attn_ext_vec_q4_0_hk576_hv512")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec<FA_TYPES, block_q4_0, 8, dequantize_q4_0_t4, block_q4_0, 8, dequantize_q4_0_t4, 576, 512, 2>; +template [[host_name("kernel_flash_attn_ext_vec_q4_1_hk576_hv512")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec<FA_TYPES, block_q4_1, 8, dequantize_q4_1_t4, block_q4_1, 8, dequantize_q4_1_t4, 576, 512, 2>; +template [[host_name("kernel_flash_attn_ext_vec_q5_0_hk576_hv512")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec<FA_TYPES, block_q5_0, 8, dequantize_q5_0_t4, block_q5_0, 8, dequantize_q5_0_t4, 576, 512, 2>; +template [[host_name("kernel_flash_attn_ext_vec_q5_1_hk576_hv512")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec<FA_TYPES, block_q5_1, 8, dequantize_q5_1_t4, block_q5_1, 8, dequantize_q5_1_t4, 576, 512, 2>; +template [[host_name("kernel_flash_attn_ext_vec_q8_0_hk576_hv512")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec<FA_TYPES, block_q8_0, 8, dequantize_q8_0_t4, block_q8_0, 8, dequantize_q8_0_t4, 576, 512, 2>; + #undef FA_TYPES template<typename T> diff --git a/ggml/src/ggml-vulkan/ggml-vulkan.cpp b/ggml/src/ggml-vulkan/ggml-vulkan.cpp index 0e9b2e8135a7a..2f6d03c939eb5 100644 --- a/ggml/src/ggml-vulkan/ggml-vulkan.cpp +++ b/ggml/src/ggml-vulkan/ggml-vulkan.cpp @@ -9261,6 +9261,7 @@ static bool ggml_backend_vk_device_supports_op(ggml_backend_dev_t dev, const ggm case 112: case 128: case 256: + case 575: // DeepSeek MLA break; default: return false; diff --git a/src/llama-context.cpp b/src/llama-context.cpp index d3ef1cbdeb65e..983385f86d494 100644 --- a/src/llama-context.cpp +++ b/src/llama-context.cpp @@ -484,7 +484,7 @@ ggml_tensor * llama_context::build_rope_shift( // See llm_build_deepseek2() for why attn_factor has to be scaled for YaRN RoPE to work correctly. // See https://github.com/ggerganov/llama.cpp/discussions/7416 for detailed explanation. - const float yarn_attn_factor_scaled = model.arch == LLM_ARCH_DEEPSEEK2 ? 1.0f / (1.0f + 0.1f * logf(1.0f / freq_scale)) : cparams.yarn_attn_factor; + const float yarn_attn_factor = model.arch == LLM_ARCH_DEEPSEEK2 ? 1.0f / (1.0f + 0.1f * logf(1.0f / freq_scale)) : cparams.yarn_attn_factor; ggml_tensor * tmp; @@ -504,14 +504,14 @@ ggml_tensor * llama_context::build_rope_shift( tmp = ggml_rope_ext_inplace(ctx0, tmp, shift, factors, n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, - yarn_ext_factor, yarn_attn_factor_scaled, yarn_beta_fast, yarn_beta_slow); + yarn_ext_factor, yarn_attn_factor, yarn_beta_fast, yarn_beta_slow); tmp = ggml_cpy(ctx0, tmp, cur); } else { // we rotate only the first n_rot dimensions tmp = ggml_rope_ext_inplace(ctx0, cur, shift, factors, n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, - yarn_ext_factor, yarn_attn_factor_scaled, yarn_beta_fast, yarn_beta_slow); + yarn_ext_factor, yarn_attn_factor, yarn_beta_fast, yarn_beta_slow); } return tmp; @@ -2278,11 +2278,6 @@ llama_context * llama_init_from_model( params.flash_attn = false; } - if (params.flash_attn && model->arch == LLM_ARCH_DEEPSEEK2) { - LLAMA_LOG_WARN("%s: flash_attn is not compatible with Deepseek2 - forcing off\n", __func__); - params.flash_attn = false; - } - if (ggml_is_quantized(params.type_v) && !params.flash_attn) { LLAMA_LOG_ERROR("%s: V cache quantization requires flash_attn\n", __func__); return nullptr; diff --git a/src/llama-graph.cpp b/src/llama-graph.cpp index 5d0222b981058..a85e97288e1ae 100644 --- a/src/llama-graph.cpp +++ b/src/llama-graph.cpp @@ -1200,9 +1200,6 @@ ggml_tensor * llm_graph_context::build_attn_mha( //const auto & n_embd_head_k = hparams.n_embd_head_k; //const auto & n_embd_head_v = hparams.n_embd_head_v; - // note: for MLA with the absorption optimization, the final embedding size will be changed via v_mla - const auto n_embd_head_v = v_mla == nullptr ? v_trans ? v->ne[1] : v->ne[0] : v_mla->ne[1]; - const auto n_tokens = q->ne[1]; const auto n_head = q->ne[2]; const auto n_kv = k->ne[1]; @@ -1231,7 +1228,12 @@ ggml_tensor * llm_graph_context::build_attn_mha( ggml_flash_attn_ext_set_prec(cur, GGML_PREC_F32); - cur = ggml_reshape_2d(ctx0, cur, n_embd_head_v*n_head, n_tokens); + if (v_mla) { + cur = ggml_reshape_4d(ctx0, cur, v_mla->ne[0], 1, n_head, n_tokens); + cur = ggml_mul_mat(ctx0, v_mla, cur); + } + + cur = ggml_reshape_2d(ctx0, cur, cur->ne[0]*n_head, n_tokens); } else { ggml_tensor * kq = ggml_mul_mat(ctx0, k, q); @@ -1274,9 +1276,9 @@ ggml_tensor * llm_graph_context::build_attn_mha( kqv = ggml_mul_mat(ctx0, v_mla, kqv); } - ggml_tensor * kqv_merged = ggml_permute(ctx0, kqv, 0, 2, 1, 3); + cur = ggml_permute(ctx0, kqv, 0, 2, 1, 3); - cur = ggml_cont_2d(ctx0, kqv_merged, n_embd_head_v*n_head, n_tokens); + cur = ggml_cont_2d(ctx0, cur, cur->ne[0]*n_head, n_tokens); if (!cparams.offload_kqv) { // all nodes between the KV store and the attention output are run on the CPU diff --git a/src/llama-model.cpp b/src/llama-model.cpp index 248c61748eaa8..6b7bfecf3a1cf 100644 --- a/src/llama-model.cpp +++ b/src/llama-model.cpp @@ -10050,7 +10050,7 @@ struct llm_build_deepseek2 : public llm_graph_context { // See https://github.com/ggerganov/llama.cpp/discussions/7416 for detailed explanation. const float mscale = attn_factor * (1.0f + hparams.rope_yarn_log_mul * logf(1.0f / freq_scale)); const float kq_scale = 1.0f*mscale*mscale/sqrtf(float(n_embd_head_k)); - const float attn_factor_scaled = 1.0f / (1.0f + 0.1f * logf(1.0f / freq_scale)); + const float attn_factor = 1.0f / (1.0f + 0.1f * logf(1.0f / freq_scale)); ggml_tensor * cur; ggml_tensor * inpL; @@ -10127,13 +10127,13 @@ struct llm_build_deepseek2 : public llm_graph_context { q_pe = ggml_rope_ext(ctx0, q_pe, inp_pos, nullptr, n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, - ext_factor, attn_factor_scaled, beta_fast, beta_slow + ext_factor, attn_factor, beta_fast, beta_slow ); cb(q_pe, "q_pe", il); k_pe = ggml_rope_ext(ctx0, k_pe, inp_pos, nullptr, n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, - ext_factor, attn_factor_scaled, beta_fast, beta_slow + ext_factor, attn_factor, beta_fast, beta_slow ); cb(k_pe, "k_pe", il); diff --git a/tests/test-backend-ops.cpp b/tests/test-backend-ops.cpp index 1ee742894695b..5f6f87d1a3a7b 100644 --- a/tests/test-backend-ops.cpp +++ b/tests/test-backend-ops.cpp @@ -4428,10 +4428,11 @@ static std::vector<std::unique_ptr<test_case>> make_test_cases_eval() { test_cases.emplace_back(new test_timestep_embedding()); test_cases.emplace_back(new test_leaky_relu()); - for (int hsk : { 64, 80, 128, 192, 256, }) { - for (int hsv : { 64, 80, 128, 192, 256, }) { - if (hsk != 192 && hsk != hsv) continue; + for (int hsk : { 64, 80, 128, 192, 256, 576 }) { + for (int hsv : { 64, 80, 128, 192, 256, 512 }) { + if (hsk != 192 && hsk != 576 && hsk != hsv) continue; if (hsk == 192 && (hsv != 128 && hsv != 192)) continue; + if (hsk == 576 && hsv != 512) continue; // DeepSeek MLA for (bool mask : { true, false } ) { for (float max_bias : { 0.0f, 8.0f }) {
diff --git a/ggml/src/ggml-cuda/ggml-cuda.cu b/ggml/src/ggml-cuda/ggml-cuda.cu index bab85809ac068..a7febef723c2e 100644 --- a/ggml/src/ggml-cuda/ggml-cuda.cu +++ b/ggml/src/ggml-cuda/ggml-cuda.cu @@ -3237,6 +3237,10 @@ static bool ggml_backend_cuda_device_supports_op(ggml_backend_dev_t dev, const g if (op->src[0]->ne[0] == 192) { + // DeepSeek MLA if (op->src[0]->ne[3] != 1) { diff --git a/ggml/src/ggml-metal/ggml-metal.m b/ggml/src/ggml-metal/ggml-metal.m index 16ca08383c7ed..85f3ae7bfdc31 100644 --- a/ggml/src/ggml-metal/ggml-metal.m +++ b/ggml/src/ggml-metal/ggml-metal.m @@ -354,6 +354,7 @@ static void ggml_backend_metal_device_rel(struct ggml_backend_metal_device_conte GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_F16_H192, GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_F16_HK192_HV128, GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_F16_H256, + GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_F16_HK576_HV512, GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_BF16_H64, GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_BF16_H80, GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_BF16_H96, @@ -362,6 +363,7 @@ static void ggml_backend_metal_device_rel(struct ggml_backend_metal_device_conte GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_BF16_H192, GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_BF16_HK192_HV128, GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_BF16_H256, + GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_BF16_HK576_HV512, GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_Q4_0_H64, GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_Q4_0_H80, GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_Q4_0_H96, @@ -370,6 +372,7 @@ static void ggml_backend_metal_device_rel(struct ggml_backend_metal_device_conte GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_Q4_0_H192, GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_Q4_0_HK192_HV128, GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_Q4_0_H256, + GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_Q4_0_HK576_HV512, GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_Q4_1_H64, GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_Q4_1_H80, GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_Q4_1_H96, @@ -378,6 +381,7 @@ static void ggml_backend_metal_device_rel(struct ggml_backend_metal_device_conte GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_Q4_1_H192, GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_Q4_1_HK192_HV128, GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_Q4_1_H256, + GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_Q4_1_HK576_HV512, GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_Q5_0_H64, GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_Q5_0_H80, GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_Q5_0_H96, @@ -386,6 +390,7 @@ static void ggml_backend_metal_device_rel(struct ggml_backend_metal_device_conte GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_Q5_0_H192, GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_Q5_0_HK192_HV128, GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_Q5_0_H256, + GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_Q5_0_HK576_HV512, GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_Q5_1_H64, GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_Q5_1_H80, GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_Q5_1_H96, @@ -394,6 +399,7 @@ static void ggml_backend_metal_device_rel(struct ggml_backend_metal_device_conte GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_Q5_1_H192, GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_Q5_1_HK192_HV128, GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_Q5_1_H256, + GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_Q5_1_HK576_HV512, GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_Q8_0_H64, GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_Q8_0_H80, GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_Q8_0_H96, @@ -402,6 +408,7 @@ static void ggml_backend_metal_device_rel(struct ggml_backend_metal_device_conte GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_Q8_0_H192, GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_Q8_0_HK192_HV128, GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_Q8_0_H256, + GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_Q8_0_HK576_HV512, GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_VEC_F16_H96, GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_VEC_BF16_H96, GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_VEC_Q4_0_H96, @@ -437,6 +444,13 @@ static void ggml_backend_metal_device_rel(struct ggml_backend_metal_device_conte GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_VEC_Q5_0_H256, GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_VEC_Q5_1_H256, GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_VEC_Q8_0_H256, + GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_VEC_F16_HK576_HV512, + GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_VEC_BF16_HK576_HV512, + GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_VEC_Q4_0_HK576_HV512, + GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_VEC_Q4_1_HK576_HV512, + GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_VEC_Q5_0_HK576_HV512, + GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_VEC_Q5_1_HK576_HV512, + GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_VEC_Q8_0_HK576_HV512, GGML_METAL_KERNEL_TYPE_SET_I32, GGML_METAL_KERNEL_TYPE_SET_F32, GGML_METAL_KERNEL_TYPE_CPY_F32_F32, @@ -1018,6 +1032,7 @@ @implementation GGMLMetalClass GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_F16_H192, flash_attn_ext_f16_h192, has_simdgroup_mm); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_F16_HK192_HV128, flash_attn_ext_f16_hk192_hv128, has_simdgroup_mm); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_F16_H256, flash_attn_ext_f16_h256, has_simdgroup_mm); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_F16_HK576_HV512, flash_attn_ext_f16_hk576_hv512, has_simdgroup_mm); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_BF16_H64, flash_attn_ext_bf16_h64, has_simdgroup_mm && use_bfloat); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_BF16_H80, flash_attn_ext_bf16_h80, has_simdgroup_mm && use_bfloat); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_BF16_H96, flash_attn_ext_bf16_h96, has_simdgroup_mm && use_bfloat); @@ -1026,6 +1041,7 @@ @implementation GGMLMetalClass GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_BF16_H192, flash_attn_ext_bf16_h192, has_simdgroup_mm && use_bfloat); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_BF16_HK192_HV128, flash_attn_ext_bf16_hk192_hv128, has_simdgroup_mm && use_bfloat); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_BF16_H256, flash_attn_ext_bf16_h256, has_simdgroup_mm && use_bfloat); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_BF16_HK576_HV512, flash_attn_ext_bf16_hk576_hv512, has_simdgroup_mm && use_bfloat); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_Q4_0_H64, flash_attn_ext_q4_0_h64, has_simdgroup_mm); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_Q4_0_H80, flash_attn_ext_q4_0_h80, has_simdgroup_mm); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_Q4_0_H96, flash_attn_ext_q4_0_h96, has_simdgroup_mm); @@ -1034,6 +1050,7 @@ @implementation GGMLMetalClass GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_Q4_0_H192, flash_attn_ext_q4_0_h192, has_simdgroup_mm); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_Q4_0_HK192_HV128, flash_attn_ext_q4_0_hk192_hv128, has_simdgroup_mm); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_Q4_0_H256, flash_attn_ext_q4_0_h256, has_simdgroup_mm); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_Q4_0_HK576_HV512, flash_attn_ext_q4_0_hk576_hv512, has_simdgroup_mm); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_Q4_1_H64, flash_attn_ext_q4_1_h64, has_simdgroup_mm); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_Q4_1_H80, flash_attn_ext_q4_1_h80, has_simdgroup_mm); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_Q4_1_H96, flash_attn_ext_q4_1_h96, has_simdgroup_mm); @@ -1042,6 +1059,7 @@ @implementation GGMLMetalClass GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_Q4_1_H192, flash_attn_ext_q4_1_h192, has_simdgroup_mm); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_Q4_1_HK192_HV128, flash_attn_ext_q4_1_hk192_hv128, has_simdgroup_mm); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_Q4_1_H256, flash_attn_ext_q4_1_h256, has_simdgroup_mm); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_Q4_1_HK576_HV512, flash_attn_ext_q4_1_hk576_hv512, has_simdgroup_mm); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_Q5_0_H64, flash_attn_ext_q5_0_h64, has_simdgroup_mm); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_Q5_0_H80, flash_attn_ext_q5_0_h80, has_simdgroup_mm); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_Q5_0_H96, flash_attn_ext_q5_0_h96, has_simdgroup_mm); @@ -1050,6 +1068,7 @@ @implementation GGMLMetalClass GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_Q5_0_H192, flash_attn_ext_q5_0_h192, has_simdgroup_mm); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_Q5_0_HK192_HV128, flash_attn_ext_q5_0_hk192_hv128, has_simdgroup_mm); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_Q5_0_H256, flash_attn_ext_q5_0_h256, has_simdgroup_mm); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_Q5_0_HK576_HV512, flash_attn_ext_q5_0_hk576_hv512, has_simdgroup_mm); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_Q5_1_H64, flash_attn_ext_q5_1_h64, has_simdgroup_mm); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_Q5_1_H80, flash_attn_ext_q5_1_h80, has_simdgroup_mm); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_Q5_1_H96, flash_attn_ext_q5_1_h96, has_simdgroup_mm); @@ -1058,6 +1077,7 @@ @implementation GGMLMetalClass GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_Q5_1_H192, flash_attn_ext_q5_1_h192, has_simdgroup_mm); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_Q5_1_HK192_HV128, flash_attn_ext_q5_1_hk192_hv128, has_simdgroup_mm); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_Q5_1_H256, flash_attn_ext_q5_1_h256, has_simdgroup_mm); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_Q5_1_HK576_HV512, flash_attn_ext_q5_1_hk576_hv512, has_simdgroup_mm); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_Q8_0_H64, flash_attn_ext_q8_0_h64, has_simdgroup_mm); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_Q8_0_H80, flash_attn_ext_q8_0_h80, has_simdgroup_mm); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_Q8_0_H96, flash_attn_ext_q8_0_h96, has_simdgroup_mm); @@ -1066,6 +1086,7 @@ @implementation GGMLMetalClass GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_Q8_0_H192, flash_attn_ext_q8_0_h192, has_simdgroup_mm); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_Q8_0_HK192_HV128, flash_attn_ext_q8_0_hk192_hv128, has_simdgroup_mm); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_Q8_0_H256, flash_attn_ext_q8_0_h256, has_simdgroup_mm); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_Q8_0_HK576_HV512, flash_attn_ext_q8_0_hk576_hv512, has_simdgroup_mm); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_VEC_F16_H96, flash_attn_ext_vec_f16_h96, has_simdgroup_reduction); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_VEC_BF16_H96, flash_attn_ext_vec_bf16_h96, has_simdgroup_reduction && use_bfloat); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_VEC_Q4_0_H96, flash_attn_ext_vec_q4_0_h96, has_simdgroup_reduction); @@ -1101,6 +1122,13 @@ @implementation GGMLMetalClass GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_VEC_Q5_0_H256, flash_attn_ext_vec_q5_0_h256, has_simdgroup_reduction); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_VEC_Q5_1_H256, flash_attn_ext_vec_q5_1_h256, has_simdgroup_reduction); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_VEC_Q8_0_H256, flash_attn_ext_vec_q8_0_h256, has_simdgroup_reduction); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_VEC_F16_HK576_HV512, flash_attn_ext_vec_f16_hk576_hv512, has_simdgroup_reduction); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_VEC_BF16_HK576_HV512, flash_attn_ext_vec_bf16_hk576_hv512, has_simdgroup_reduction && use_bfloat); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_VEC_Q4_0_HK576_HV512, flash_attn_ext_vec_q4_0_hk576_hv512, has_simdgroup_reduction); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_VEC_Q4_1_HK576_HV512, flash_attn_ext_vec_q4_1_hk576_hv512, has_simdgroup_reduction); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_VEC_Q5_1_HK576_HV512, flash_attn_ext_vec_q5_1_hk576_hv512, has_simdgroup_reduction); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_VEC_Q8_0_HK576_HV512, flash_attn_ext_vec_q8_0_hk576_hv512, has_simdgroup_reduction); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_SET_F32, set_f32, true); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_SET_I32, set_i32, true); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_CPY_F32_F32, cpy_f32_f32, true); @@ -1365,6 +1393,11 @@ static bool ggml_metal_supports_op(const struct ggml_backend_metal_device_contex // TODO: not sure if it is worth adding kernels for this size + // DeepSeek sizes + // TODO: disabled for now, until optmized if (op->src[1]->type != op->src[2]->type) { @@ -3857,12 +3890,14 @@ static void ggml_metal_encode_node( // TODO: add vec kernels for (ne00%64 == 0) and maybe also for (ne00%32 == 0) // for now avoiding mainly to keep the number of templates/kernels a bit lower // these are now trivial to add after: https://github.com/ggml-org/llama.cpp/pull/12612 - if (ne01 >= 4 || (ne00%128 != 0 && ne00 != 96 && ne00 != 192)) { + if (ne01 >= 4 || (ne00%128 != 0 && ne00 != 96 && ne00 != 192 && ne00 != 576)) { switch (src1->type) { case GGML_TYPE_F16: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_F16_HK192_HV128].pipeline; + pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_F16_HK576_HV512].pipeline; case 64: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_F16_H64 ].pipeline; break; @@ -3885,6 +3920,8 @@ static void ggml_metal_encode_node( pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_BF16_HK192_HV128].pipeline; + pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_BF16_HK576_HV512].pipeline; case 64: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_BF16_H64 ].pipeline; break; @@ -3907,6 +3944,8 @@ static void ggml_metal_encode_node( pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_Q4_0_HK192_HV128].pipeline; + pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_Q4_0_HK576_HV512].pipeline; case 64: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_Q4_0_H64 ].pipeline; break; @@ -3929,6 +3968,8 @@ static void ggml_metal_encode_node( pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_Q4_1_HK192_HV128].pipeline; + pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_Q4_1_HK576_HV512].pipeline; case 64: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_Q4_1_H64 ].pipeline; break; @@ -3951,6 +3992,8 @@ static void ggml_metal_encode_node( pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_Q5_0_HK192_HV128].pipeline; + pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_Q5_0_HK576_HV512].pipeline; case 64: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_Q5_0_H64 ].pipeline; break; @@ -3973,6 +4016,8 @@ static void ggml_metal_encode_node( pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_Q5_1_HK192_HV128].pipeline; + pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_Q5_1_HK576_HV512].pipeline; case 64: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_Q5_1_H64 ].pipeline; break; @@ -3995,6 +4040,8 @@ static void ggml_metal_encode_node( pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_Q8_0_HK192_HV128].pipeline; + pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_Q8_0_HK576_HV512].pipeline; case 64: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_Q8_0_H64 ].pipeline; break; @@ -4114,12 +4161,36 @@ static void ggml_metal_encode_node( } } } break; + case 576: + if (ne20 == 512) { + switch (src1->type) { + case GGML_TYPE_F16: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_VEC_F16_HK576_HV512].pipeline; break; + case GGML_TYPE_BF16: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_VEC_BF16_HK576_HV512].pipeline; break; + case GGML_TYPE_Q4_0: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_VEC_Q4_0_HK576_HV512].pipeline; break; + case GGML_TYPE_Q5_0: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_VEC_Q5_0_HK576_HV512].pipeline; break; + case GGML_TYPE_Q5_1: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_VEC_Q5_1_HK576_HV512].pipeline; break; + case GGML_TYPE_Q8_0: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_VEC_Q8_0_HK576_HV512].pipeline; break; + default: + { + GGML_LOG_ERROR("unsupported type: %d\n", src1->type); + GGML_LOG_ERROR("add template specialization for this type\n"); + GGML_ABORT("add template specialization for this type"); + } + } + } else { + GGML_LOG_ERROR("unsupported size: %lld\n", ne20); + GGML_LOG_ERROR("add template specialization for this size\n"); + GGML_ABORT("add template specialization for this size"); + } + } break; default: - { - GGML_LOG_ERROR("unsupported size: %lld\n", ne00); - GGML_ABORT("add template specialization for this size"); - } + GGML_LOG_ERROR("unsupported size: %lld\n", ne00); + GGML_LOG_ERROR("add template specialization for this size\n"); + GGML_ABORT("add template specialization for this size"); } } diff --git a/ggml/src/ggml-metal/ggml-metal.metal b/ggml/src/ggml-metal/ggml-metal.metal index 0fb28238a8adb..dc7eab03ee8a2 100644 --- a/ggml/src/ggml-metal/ggml-metal.metal +++ b/ggml/src/ggml-metal/ggml-metal.metal @@ -3546,6 +3546,7 @@ template [[host_name("kernel_flash_attn_ext_f16_h128")]] kernel flash_at template [[host_name("kernel_flash_attn_ext_f16_h192")]] kernel flash_attn_ext_t kernel_flash_attn_ext<FA_TYPES, half4x4, 1, dequantize_f16, half4x4, 1, dequantize_f16, 192, 192>; template [[host_name("kernel_flash_attn_ext_f16_hk192_hv128")]] kernel flash_attn_ext_t kernel_flash_attn_ext<FA_TYPES, half4x4, 1, dequantize_f16, half4x4, 1, dequantize_f16, 192, 128>; template [[host_name("kernel_flash_attn_ext_f16_h256")]] kernel flash_attn_ext_t kernel_flash_attn_ext<FA_TYPES, half4x4, 1, dequantize_f16, half4x4, 1, dequantize_f16, 256, 256>; +template [[host_name("kernel_flash_attn_ext_f16_hk576_hv512")]] kernel flash_attn_ext_t kernel_flash_attn_ext<FA_TYPES, half4x4, 1, dequantize_f16, half4x4, 1, dequantize_f16, 576, 512>; #if defined(GGML_METAL_USE_BF16) template [[host_name("kernel_flash_attn_ext_bf16_h64" )]] kernel flash_attn_ext_t kernel_flash_attn_ext<FA_TYPES, bfloat4x4, 1, dequantize_bf16, bfloat4x4, 1, dequantize_bf16, 64, 64>; @@ -3556,6 +3557,7 @@ template [[host_name("kernel_flash_attn_ext_bf16_h128")]] kernel flash_at template [[host_name("kernel_flash_attn_ext_bf16_h192")]] kernel flash_attn_ext_t kernel_flash_attn_ext<FA_TYPES, bfloat4x4, 1, dequantize_bf16, bfloat4x4, 1, dequantize_bf16, 192, 192>; template [[host_name("kernel_flash_attn_ext_bf16_hk192_hv128")]] kernel flash_attn_ext_t kernel_flash_attn_ext<FA_TYPES, bfloat4x4, 1, dequantize_bf16, bfloat4x4, 1, dequantize_bf16, 192, 128>; template [[host_name("kernel_flash_attn_ext_bf16_h256")]] kernel flash_attn_ext_t kernel_flash_attn_ext<FA_TYPES, bfloat4x4, 1, dequantize_bf16, bfloat4x4, 1, dequantize_bf16, 256, 256>; +template [[host_name("kernel_flash_attn_ext_bf16_hk576_hv512")]] kernel flash_attn_ext_t kernel_flash_attn_ext<FA_TYPES, bfloat4x4, 1, dequantize_bf16, bfloat4x4, 1, dequantize_bf16, 576, 512>; #endif template [[host_name("kernel_flash_attn_ext_q4_0_h64" )]] kernel flash_attn_ext_t kernel_flash_attn_ext<FA_TYPES, block_q4_0, 2, dequantize_q4_0, block_q4_0, 2, dequantize_q4_0, 64, 64>; @@ -3566,6 +3568,7 @@ template [[host_name("kernel_flash_attn_ext_q4_0_h128")]] kernel flash_at template [[host_name("kernel_flash_attn_ext_q4_0_h192")]] kernel flash_attn_ext_t kernel_flash_attn_ext<FA_TYPES, block_q4_0, 2, dequantize_q4_0, block_q4_0, 2, dequantize_q4_0, 192, 192>; template [[host_name("kernel_flash_attn_ext_q4_0_hk192_hv128")]] kernel flash_attn_ext_t kernel_flash_attn_ext<FA_TYPES, block_q4_0, 2, dequantize_q4_0, block_q4_0, 2, dequantize_q4_0, 192, 128>; template [[host_name("kernel_flash_attn_ext_q4_0_h256")]] kernel flash_attn_ext_t kernel_flash_attn_ext<FA_TYPES, block_q4_0, 2, dequantize_q4_0, block_q4_0, 2, dequantize_q4_0, 256, 256>; +template [[host_name("kernel_flash_attn_ext_q4_0_hk576_hv512")]] kernel flash_attn_ext_t kernel_flash_attn_ext<FA_TYPES, block_q4_0, 2, dequantize_q4_0, block_q4_0, 2, dequantize_q4_0, 576, 512>; template [[host_name("kernel_flash_attn_ext_q4_1_h64" )]] kernel flash_attn_ext_t kernel_flash_attn_ext<FA_TYPES, block_q4_1, 2, dequantize_q4_1, block_q4_1, 2, dequantize_q4_1, 64, 64>; template [[host_name("kernel_flash_attn_ext_q4_1_h80" )]] kernel flash_attn_ext_t kernel_flash_attn_ext<FA_TYPES, block_q4_1, 2, dequantize_q4_1, block_q4_1, 2, dequantize_q4_1, 80, 80>; @@ -3575,6 +3578,7 @@ template [[host_name("kernel_flash_attn_ext_q4_1_h128")]] kernel flash_at template [[host_name("kernel_flash_attn_ext_q4_1_h192")]] kernel flash_attn_ext_t kernel_flash_attn_ext<FA_TYPES, block_q4_1, 2, dequantize_q4_1, block_q4_1, 2, dequantize_q4_1, 192, 192>; template [[host_name("kernel_flash_attn_ext_q4_1_hk192_hv128")]] kernel flash_attn_ext_t kernel_flash_attn_ext<FA_TYPES, block_q4_1, 2, dequantize_q4_1, block_q4_1, 2, dequantize_q4_1, 192, 128>; template [[host_name("kernel_flash_attn_ext_q4_1_h256")]] kernel flash_attn_ext_t kernel_flash_attn_ext<FA_TYPES, block_q4_1, 2, dequantize_q4_1, block_q4_1, 2, dequantize_q4_1, 256, 256>; template [[host_name("kernel_flash_attn_ext_q5_0_h64" )]] kernel flash_attn_ext_t kernel_flash_attn_ext<FA_TYPES, block_q5_0, 2, dequantize_q5_0, block_q5_0, 2, dequantize_q5_0, 64, 64>; template [[host_name("kernel_flash_attn_ext_q5_0_h80" )]] kernel flash_attn_ext_t kernel_flash_attn_ext<FA_TYPES, block_q5_0, 2, dequantize_q5_0, block_q5_0, 2, dequantize_q5_0, 80, 80>; @@ -3584,6 +3588,7 @@ template [[host_name("kernel_flash_attn_ext_q5_0_h128")]] kernel flash_at template [[host_name("kernel_flash_attn_ext_q5_0_h192")]] kernel flash_attn_ext_t kernel_flash_attn_ext<FA_TYPES, block_q5_0, 2, dequantize_q5_0, block_q5_0, 2, dequantize_q5_0, 192, 192>; template [[host_name("kernel_flash_attn_ext_q5_0_hk192_hv128")]] kernel flash_attn_ext_t kernel_flash_attn_ext<FA_TYPES, block_q5_0, 2, dequantize_q5_0, block_q5_0, 2, dequantize_q5_0, 192, 128>; template [[host_name("kernel_flash_attn_ext_q5_0_h256")]] kernel flash_attn_ext_t kernel_flash_attn_ext<FA_TYPES, block_q5_0, 2, dequantize_q5_0, block_q5_0, 2, dequantize_q5_0, 256, 256>; template [[host_name("kernel_flash_attn_ext_q5_1_h64" )]] kernel flash_attn_ext_t kernel_flash_attn_ext<FA_TYPES, block_q5_1, 2, dequantize_q5_1, block_q5_1, 2, dequantize_q5_1, 64, 64>; template [[host_name("kernel_flash_attn_ext_q5_1_h80" )]] kernel flash_attn_ext_t kernel_flash_attn_ext<FA_TYPES, block_q5_1, 2, dequantize_q5_1, block_q5_1, 2, dequantize_q5_1, 80, 80>; @@ -3593,6 +3598,7 @@ template [[host_name("kernel_flash_attn_ext_q5_1_h128")]] kernel flash_at template [[host_name("kernel_flash_attn_ext_q5_1_h192")]] kernel flash_attn_ext_t kernel_flash_attn_ext<FA_TYPES, block_q5_1, 2, dequantize_q5_1, block_q5_1, 2, dequantize_q5_1, 192, 192>; template [[host_name("kernel_flash_attn_ext_q5_1_hk192_hv128")]] kernel flash_attn_ext_t kernel_flash_attn_ext<FA_TYPES, block_q5_1, 2, dequantize_q5_1, block_q5_1, 2, dequantize_q5_1, 192, 128>; template [[host_name("kernel_flash_attn_ext_q5_1_h256")]] kernel flash_attn_ext_t kernel_flash_attn_ext<FA_TYPES, block_q5_1, 2, dequantize_q5_1, block_q5_1, 2, dequantize_q5_1, 256, 256>; +template [[host_name("kernel_flash_attn_ext_q5_1_hk576_hv512")]] kernel flash_attn_ext_t kernel_flash_attn_ext<FA_TYPES, block_q5_1, 2, dequantize_q5_1, block_q5_1, 2, dequantize_q5_1, 576, 512>; template [[host_name("kernel_flash_attn_ext_q8_0_h64" )]] kernel flash_attn_ext_t kernel_flash_attn_ext<FA_TYPES, block_q8_0, 2, dequantize_q8_0, block_q8_0, 2, dequantize_q8_0, 64, 64>; template [[host_name("kernel_flash_attn_ext_q8_0_h80" )]] kernel flash_attn_ext_t kernel_flash_attn_ext<FA_TYPES, block_q8_0, 2, dequantize_q8_0, block_q8_0, 2, dequantize_q8_0, 80, 80>; @@ -3602,6 +3608,7 @@ template [[host_name("kernel_flash_attn_ext_q8_0_h128")]] kernel flash_at template [[host_name("kernel_flash_attn_ext_q8_0_h192")]] kernel flash_attn_ext_t kernel_flash_attn_ext<FA_TYPES, block_q8_0, 2, dequantize_q8_0, block_q8_0, 2, dequantize_q8_0, 192, 192>; template [[host_name("kernel_flash_attn_ext_q8_0_hk192_hv128")]] kernel flash_attn_ext_t kernel_flash_attn_ext<FA_TYPES, block_q8_0, 2, dequantize_q8_0, block_q8_0, 2, dequantize_q8_0, 192, 128>; template [[host_name("kernel_flash_attn_ext_q8_0_h256")]] kernel flash_attn_ext_t kernel_flash_attn_ext<FA_TYPES, block_q8_0, 2, dequantize_q8_0, block_q8_0, 2, dequantize_q8_0, 256, 256>; +template [[host_name("kernel_flash_attn_ext_q8_0_hk576_hv512")]] kernel flash_attn_ext_t kernel_flash_attn_ext<FA_TYPES, block_q8_0, 2, dequantize_q8_0, block_q8_0, 2, dequantize_q8_0, 576, 512>; @@ -4009,6 +4016,16 @@ template [[host_name("kernel_flash_attn_ext_vec_q5_0_h256")]] kernel flash_attn_ template [[host_name("kernel_flash_attn_ext_vec_q5_1_h256")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec<FA_TYPES, block_q5_1, 8, dequantize_q5_1_t4, block_q5_1, 8, dequantize_q5_1_t4, 256, 256, 4>; template [[host_name("kernel_flash_attn_ext_vec_q8_0_h256")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec<FA_TYPES, block_q8_0, 8, dequantize_q8_0_t4, block_q8_0, 8, dequantize_q8_0_t4, 256, 256, 4>; +template [[host_name("kernel_flash_attn_ext_vec_f16_hk576_hv512")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec<FA_TYPES, half4, 1, dequantize_f16_t4, half4, 1, dequantize_f16_t4, 576, 512, 2>; +#if defined(GGML_METAL_USE_BF16) +template [[host_name("kernel_flash_attn_ext_vec_bf16_hk576_hv512")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec<FA_TYPES, bfloat4, 1, dequantize_bf16_t4, bfloat4, 1, dequantize_bf16_t4, 576, 512, 2>; +#endif +template [[host_name("kernel_flash_attn_ext_vec_q4_1_hk576_hv512")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec<FA_TYPES, block_q4_1, 8, dequantize_q4_1_t4, block_q4_1, 8, dequantize_q4_1_t4, 576, 512, 2>; +template [[host_name("kernel_flash_attn_ext_vec_q5_0_hk576_hv512")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec<FA_TYPES, block_q5_0, 8, dequantize_q5_0_t4, block_q5_0, 8, dequantize_q5_0_t4, 576, 512, 2>; +template [[host_name("kernel_flash_attn_ext_vec_q5_1_hk576_hv512")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec<FA_TYPES, block_q5_1, 8, dequantize_q5_1_t4, block_q5_1, 8, dequantize_q5_1_t4, 576, 512, 2>; +template [[host_name("kernel_flash_attn_ext_vec_q8_0_hk576_hv512")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec<FA_TYPES, block_q8_0, 8, dequantize_q8_0_t4, block_q8_0, 8, dequantize_q8_0_t4, 576, 512, 2>; template<typename T> diff --git a/ggml/src/ggml-vulkan/ggml-vulkan.cpp b/ggml/src/ggml-vulkan/ggml-vulkan.cpp index 0e9b2e8135a7a..2f6d03c939eb5 100644 --- a/ggml/src/ggml-vulkan/ggml-vulkan.cpp +++ b/ggml/src/ggml-vulkan/ggml-vulkan.cpp @@ -9261,6 +9261,7 @@ static bool ggml_backend_vk_device_supports_op(ggml_backend_dev_t dev, const ggm case 112: case 128: case 256: break; default: return false; diff --git a/src/llama-context.cpp b/src/llama-context.cpp index d3ef1cbdeb65e..983385f86d494 100644 --- a/src/llama-context.cpp +++ b/src/llama-context.cpp @@ -484,7 +484,7 @@ ggml_tensor * llama_context::build_rope_shift( // See llm_build_deepseek2() for why attn_factor has to be scaled for YaRN RoPE to work correctly. // See https://github.com/ggerganov/llama.cpp/discussions/7416 for detailed explanation. - const float yarn_attn_factor_scaled = model.arch == LLM_ARCH_DEEPSEEK2 ? 1.0f / (1.0f + 0.1f * logf(1.0f / freq_scale)) : cparams.yarn_attn_factor; + const float yarn_attn_factor = model.arch == LLM_ARCH_DEEPSEEK2 ? 1.0f / (1.0f + 0.1f * logf(1.0f / freq_scale)) : cparams.yarn_attn_factor; ggml_tensor * tmp; @@ -504,14 +504,14 @@ ggml_tensor * llama_context::build_rope_shift( tmp = ggml_rope_ext_inplace(ctx0, tmp, tmp = ggml_cpy(ctx0, tmp, cur); // we rotate only the first n_rot dimensions tmp = ggml_rope_ext_inplace(ctx0, cur, return tmp; @@ -2278,11 +2278,6 @@ llama_context * llama_init_from_model( params.flash_attn = false; - if (params.flash_attn && model->arch == LLM_ARCH_DEEPSEEK2) { - LLAMA_LOG_WARN("%s: flash_attn is not compatible with Deepseek2 - forcing off\n", __func__); - params.flash_attn = false; - } if (ggml_is_quantized(params.type_v) && !params.flash_attn) { LLAMA_LOG_ERROR("%s: V cache quantization requires flash_attn\n", __func__); return nullptr; diff --git a/src/llama-graph.cpp b/src/llama-graph.cpp index 5d0222b981058..a85e97288e1ae 100644 --- a/src/llama-graph.cpp +++ b/src/llama-graph.cpp @@ -1200,9 +1200,6 @@ ggml_tensor * llm_graph_context::build_attn_mha( //const auto & n_embd_head_k = hparams.n_embd_head_k; //const auto & n_embd_head_v = hparams.n_embd_head_v; - // note: for MLA with the absorption optimization, the final embedding size will be changed via v_mla - const auto n_embd_head_v = v_mla == nullptr ? v_trans ? v->ne[1] : v->ne[0] : v_mla->ne[1]; const auto n_tokens = q->ne[1]; const auto n_head = q->ne[2]; const auto n_kv = k->ne[1]; @@ -1231,7 +1228,12 @@ ggml_tensor * llm_graph_context::build_attn_mha( ggml_flash_attn_ext_set_prec(cur, GGML_PREC_F32); + if (v_mla) { + cur = ggml_reshape_4d(ctx0, cur, v_mla->ne[0], 1, n_head, n_tokens); + cur = ggml_mul_mat(ctx0, v_mla, cur); + } + cur = ggml_reshape_2d(ctx0, cur, cur->ne[0]*n_head, n_tokens); ggml_tensor * kq = ggml_mul_mat(ctx0, k, q); @@ -1274,9 +1276,9 @@ ggml_tensor * llm_graph_context::build_attn_mha( kqv = ggml_mul_mat(ctx0, v_mla, kqv); } - ggml_tensor * kqv_merged = ggml_permute(ctx0, kqv, 0, 2, 1, 3); + cur = ggml_permute(ctx0, kqv, 0, 2, 1, 3); + cur = ggml_cont_2d(ctx0, cur, cur->ne[0]*n_head, n_tokens); if (!cparams.offload_kqv) { // all nodes between the KV store and the attention output are run on the CPU diff --git a/src/llama-model.cpp b/src/llama-model.cpp index 248c61748eaa8..6b7bfecf3a1cf 100644 --- a/src/llama-model.cpp +++ b/src/llama-model.cpp @@ -10050,7 +10050,7 @@ struct llm_build_deepseek2 : public llm_graph_context { // See https://github.com/ggerganov/llama.cpp/discussions/7416 for detailed explanation. const float mscale = attn_factor * (1.0f + hparams.rope_yarn_log_mul * logf(1.0f / freq_scale)); const float kq_scale = 1.0f*mscale*mscale/sqrtf(float(n_embd_head_k)); + const float attn_factor = 1.0f / (1.0f + 0.1f * logf(1.0f / freq_scale)); ggml_tensor * cur; ggml_tensor * inpL; @@ -10127,13 +10127,13 @@ struct llm_build_deepseek2 : public llm_graph_context { q_pe = ggml_rope_ext(ctx0, q_pe, inp_pos, nullptr, cb(q_pe, "q_pe", il); k_pe = ggml_rope_ext(ctx0, k_pe, inp_pos, nullptr, cb(k_pe, "k_pe", il); diff --git a/tests/test-backend-ops.cpp b/tests/test-backend-ops.cpp index 1ee742894695b..5f6f87d1a3a7b 100644 --- a/tests/test-backend-ops.cpp +++ b/tests/test-backend-ops.cpp @@ -4428,10 +4428,11 @@ static std::vector<std::unique_ptr<test_case>> make_test_cases_eval() { test_cases.emplace_back(new test_timestep_embedding()); test_cases.emplace_back(new test_leaky_relu()); - for (int hsk : { 64, 80, 128, 192, 256, }) { - for (int hsv : { 64, 80, 128, 192, 256, }) { + for (int hsk : { 64, 80, 128, 192, 256, 576 }) { + if (hsk != 192 && hsk != 576 && hsk != hsv) continue; if (hsk == 192 && (hsv != 128 && hsv != 192)) continue; + if (hsk == 576 && hsv != 512) continue; // DeepSeek MLA for (bool mask : { true, false } ) { for (float max_bias : { 0.0f, 8.0f }) {
[ "+ GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_VEC_Q5_0_HK576_HV512, flash_attn_ext_vec_q5_0_hk576_hv512, has_simdgroup_reduction);", "+ case GGML_TYPE_Q4_1: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_VEC_Q4_1_HK576_HV512].pipeline; break;", "- GGML_LOG_ERROR(\"add template specialization for this size\\n\");", "+ }", "+template [[host_name(\"kernel_flash_attn_ext_q4_1_hk576_hv512\")]] kernel flash_attn_ext_t kernel_flash_attn_ext<FA_TYPES, block_q4_1, 2, dequantize_q4_1, block_q4_1, 2, dequantize_q4_1, 576, 512>;", "+template [[host_name(\"kernel_flash_attn_ext_q5_0_hk576_hv512\")]] kernel flash_attn_ext_t kernel_flash_attn_ext<FA_TYPES, block_q5_0, 2, dequantize_q5_0, block_q5_0, 2, dequantize_q5_0, 576, 512>;", "+template [[host_name(\"kernel_flash_attn_ext_vec_q4_0_hk576_hv512\")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec<FA_TYPES, block_q4_0, 8, dequantize_q4_0_t4, block_q4_0, 8, dequantize_q4_0_t4, 576, 512, 2>;", "+ case 575: // DeepSeek MLA", "- cur = ggml_reshape_2d(ctx0, cur, n_embd_head_v*n_head, n_tokens);", "- cur = ggml_cont_2d(ctx0, kqv_merged, n_embd_head_v*n_head, n_tokens);", "- const float attn_factor_scaled = 1.0f / (1.0f + 0.1f * logf(1.0f / freq_scale));", "- if (hsk != 192 && hsk != hsv) continue;", "+ for (int hsv : { 64, 80, 128, 192, 256, 512 }) {" ]
[ 153, 252, 272, 279, 315, 323, 351, 368, 432, 449, 462, 493, 495 ]
{ "additions": 117, "author": "ggerganov", "deletions": 26, "html_url": "https://github.com/ggml-org/llama.cpp/pull/12953", "issue_id": 12953, "merged_at": "2025-04-17T15:16:36Z", "omission_probability": 0.1, "pr_number": 12953, "repo": "ggml-org/llama.cpp", "title": "graph : make FA compatible with MLA + add initial Metal kernels", "total_changes": 143 }
812
diff --git a/examples/rpc/README.md b/examples/rpc/README.md index 312bb634dc920..561f19fda6b06 100644 --- a/examples/rpc/README.md +++ b/examples/rpc/README.md @@ -72,3 +72,14 @@ $ bin/llama-cli -m ../models/tinyllama-1b/ggml-model-f16.gguf -p "Hello, my name This way you can offload model layers to both local and remote devices. +### Local cache + +The RPC server can use a local cache to store large tensors and avoid transferring them over the network. +This can speed up model loading significantly, especially when using large models. +To enable the cache, use the `-c` option: + +```bash +$ bin/rpc-server -c +``` + +By default, the cache is stored in the `$HOME/.cache/llama.cpp/rpc` directory and can be controlled via the `LLAMA_CACHE` environment variable.
diff --git a/examples/rpc/README.md b/examples/rpc/README.md index 312bb634dc920..561f19fda6b06 100644 --- a/examples/rpc/README.md +++ b/examples/rpc/README.md @@ -72,3 +72,14 @@ $ bin/llama-cli -m ../models/tinyllama-1b/ggml-model-f16.gguf -p "Hello, my name This way you can offload model layers to both local and remote devices. +### Local cache +The RPC server can use a local cache to store large tensors and avoid transferring them over the network. +This can speed up model loading significantly, especially when using large models. +To enable the cache, use the `-c` option: +```bash +$ bin/rpc-server -c +``` +By default, the cache is stored in the `$HOME/.cache/llama.cpp/rpc` directory and can be controlled via the `LLAMA_CACHE` environment variable.
[]
[]
{ "additions": 11, "author": "rgerganov", "deletions": 0, "html_url": "https://github.com/ggml-org/llama.cpp/pull/12620", "issue_id": 12620, "merged_at": "2025-03-28T07:44:13Z", "omission_probability": 0.1, "pr_number": 12620, "repo": "ggml-org/llama.cpp", "title": "rpc : update README for cache usage", "total_changes": 11 }
813
diff --git a/ggml/src/ggml-cuda/cpy.cu b/ggml/src/ggml-cuda/cpy.cu index 4f4faa3e63ae7..ed25646e8e261 100644 --- a/ggml/src/ggml-cuda/cpy.cu +++ b/ggml/src/ggml-cuda/cpy.cu @@ -551,7 +551,7 @@ static void ggml_cpy_f16_f16_cuda( (cx, cdst, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13, cdst_indirect, graph_cpynode_index++); } -void ggml_cuda_cpy(ggml_backend_cuda_context & ctx, const ggml_tensor * src0, ggml_tensor * src1) { +void ggml_cuda_cpy(ggml_backend_cuda_context & ctx, const ggml_tensor * src0, ggml_tensor * src1, bool disable_indirection_for_this_node) { const int64_t ne = ggml_nelements(src0); GGML_ASSERT(ne == ggml_nelements(src1)); @@ -588,7 +588,7 @@ void ggml_cuda_cpy(ggml_backend_cuda_context & ctx, const ggml_tensor * src0, gg char ** dest_ptrs_d = nullptr; int graph_cpynode_index = -1; #if defined(GGML_CUDA_USE_GRAPHS) || defined(GGML_HIP_GRAPHS) - if(ctx.cuda_graph->use_cpy_indirection) { + if(ctx.cuda_graph->use_cpy_indirection && !disable_indirection_for_this_node) { dest_ptrs_d = ctx.cuda_graph->dest_ptrs_d; graph_cpynode_index = ctx.cuda_graph->graph_cpynode_index; } @@ -636,7 +636,7 @@ void ggml_cuda_cpy(ggml_backend_cuda_context & ctx, const ggml_tensor * src0, gg ggml_type_name(src0->type), ggml_type_name(src1->type)); } #if defined(GGML_CUDA_USE_GRAPHS) || defined(GGML_HIP_GRAPHS) - if(ctx.cuda_graph->use_cpy_indirection) { + if(ctx.cuda_graph->use_cpy_indirection && !disable_indirection_for_this_node) { ctx.cuda_graph->graph_cpynode_index = graph_cpynode_index; } #endif @@ -645,7 +645,8 @@ void ggml_cuda_cpy(ggml_backend_cuda_context & ctx, const ggml_tensor * src0, gg void ggml_cuda_dup(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; - ggml_cuda_cpy(ctx, src0, dst); + bool disable_indirection = true; + ggml_cuda_cpy(ctx, src0, dst, disable_indirection); } void* ggml_cuda_cpy_fn(const ggml_tensor * src0, ggml_tensor * src1) { diff --git a/ggml/src/ggml-cuda/cpy.cuh b/ggml/src/ggml-cuda/cpy.cuh index 6bed0564df27a..0bd3c0c6f8c27 100644 --- a/ggml/src/ggml-cuda/cpy.cuh +++ b/ggml/src/ggml-cuda/cpy.cuh @@ -2,7 +2,7 @@ #define CUDA_CPY_BLOCK_SIZE 64 -void ggml_cuda_cpy(ggml_backend_cuda_context & ctx, const ggml_tensor * src0, ggml_tensor * src1); +void ggml_cuda_cpy(ggml_backend_cuda_context & ctx, const ggml_tensor * src0, ggml_tensor * src1, bool disable_indirection = false); void ggml_cuda_dup(ggml_backend_cuda_context & ctx, ggml_tensor * dst); diff --git a/ggml/src/ggml-cuda/ggml-cuda.cu b/ggml/src/ggml-cuda/ggml-cuda.cu index 9ced466512788..bab85809ac068 100644 --- a/ggml/src/ggml-cuda/ggml-cuda.cu +++ b/ggml/src/ggml-cuda/ggml-cuda.cu @@ -2489,7 +2489,7 @@ static bool check_node_graph_compatibility_and_refresh_copy_ops(ggml_backend_cud #endif } - if (node->op == GGML_OP_MUL_MAT_ID || node->op == GGML_OP_CONT || node->op == GGML_OP_DUP) { + if (node->op == GGML_OP_MUL_MAT_ID) { use_cuda_graph = false; // This node type is not supported by CUDA graph capture #ifndef NDEBUG GGML_LOG_DEBUG("%s: disabling CUDA graphs due to unsupported node type\n", __func__);
diff --git a/ggml/src/ggml-cuda/cpy.cu b/ggml/src/ggml-cuda/cpy.cu index 4f4faa3e63ae7..ed25646e8e261 100644 --- a/ggml/src/ggml-cuda/cpy.cu +++ b/ggml/src/ggml-cuda/cpy.cu @@ -551,7 +551,7 @@ static void ggml_cpy_f16_f16_cuda( (cx, cdst, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13, cdst_indirect, graph_cpynode_index++); -void ggml_cuda_cpy(ggml_backend_cuda_context & ctx, const ggml_tensor * src0, ggml_tensor * src1) { +void ggml_cuda_cpy(ggml_backend_cuda_context & ctx, const ggml_tensor * src0, ggml_tensor * src1, bool disable_indirection_for_this_node) { const int64_t ne = ggml_nelements(src0); GGML_ASSERT(ne == ggml_nelements(src1)); @@ -588,7 +588,7 @@ void ggml_cuda_cpy(ggml_backend_cuda_context & ctx, const ggml_tensor * src0, gg char ** dest_ptrs_d = nullptr; int graph_cpynode_index = -1; dest_ptrs_d = ctx.cuda_graph->dest_ptrs_d; graph_cpynode_index = ctx.cuda_graph->graph_cpynode_index; @@ -636,7 +636,7 @@ void ggml_cuda_cpy(ggml_backend_cuda_context & ctx, const ggml_tensor * src0, gg ggml_type_name(src0->type), ggml_type_name(src1->type)); ctx.cuda_graph->graph_cpynode_index = graph_cpynode_index; @@ -645,7 +645,8 @@ void ggml_cuda_cpy(ggml_backend_cuda_context & ctx, const ggml_tensor * src0, gg void ggml_cuda_dup(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; - ggml_cuda_cpy(ctx, src0, dst); + bool disable_indirection = true; + ggml_cuda_cpy(ctx, src0, dst, disable_indirection); void* ggml_cuda_cpy_fn(const ggml_tensor * src0, ggml_tensor * src1) { diff --git a/ggml/src/ggml-cuda/cpy.cuh b/ggml/src/ggml-cuda/cpy.cuh index 6bed0564df27a..0bd3c0c6f8c27 100644 --- a/ggml/src/ggml-cuda/cpy.cuh +++ b/ggml/src/ggml-cuda/cpy.cuh @@ -2,7 +2,7 @@ #define CUDA_CPY_BLOCK_SIZE 64 +void ggml_cuda_cpy(ggml_backend_cuda_context & ctx, const ggml_tensor * src0, ggml_tensor * src1, bool disable_indirection = false); void ggml_cuda_dup(ggml_backend_cuda_context & ctx, ggml_tensor * dst); diff --git a/ggml/src/ggml-cuda/ggml-cuda.cu b/ggml/src/ggml-cuda/ggml-cuda.cu index 9ced466512788..bab85809ac068 100644 --- a/ggml/src/ggml-cuda/ggml-cuda.cu +++ b/ggml/src/ggml-cuda/ggml-cuda.cu @@ -2489,7 +2489,7 @@ static bool check_node_graph_compatibility_and_refresh_copy_ops(ggml_backend_cud } - if (node->op == GGML_OP_MUL_MAT_ID || node->op == GGML_OP_CONT || node->op == GGML_OP_DUP) { + if (node->op == GGML_OP_MUL_MAT_ID) { use_cuda_graph = false; // This node type is not supported by CUDA graph capture #ifndef NDEBUG GGML_LOG_DEBUG("%s: disabling CUDA graphs due to unsupported node type\n", __func__);
[ "-void ggml_cuda_cpy(ggml_backend_cuda_context & ctx, const ggml_tensor * src0, ggml_tensor * src1);" ]
[ 49 ]
{ "additions": 7, "author": "agray3", "deletions": 6, "html_url": "https://github.com/ggml-org/llama.cpp/pull/12970", "issue_id": 12970, "merged_at": "2025-04-17T13:19:42Z", "omission_probability": 0.1, "pr_number": 12970, "repo": "ggml-org/llama.cpp", "title": "ggml: Re-enable CUDA graphs in presence of CONT and DUP nodes", "total_changes": 13 }
814
diff --git a/ggml/src/ggml-vulkan/ggml-vulkan.cpp b/ggml/src/ggml-vulkan/ggml-vulkan.cpp index 783a0ff86c1c1..0e9b2e8135a7a 100644 --- a/ggml/src/ggml-vulkan/ggml-vulkan.cpp +++ b/ggml/src/ggml-vulkan/ggml-vulkan.cpp @@ -5531,7 +5531,7 @@ static void ggml_vk_flash_attn(ggml_backend_vk_context * ctx, vk_context& subctx uint32_t workgroups_y = (uint32_t)neq2; uint32_t workgroups_z = (uint32_t)neq3; - if (N == 1 && qk_ratio > 1 && is_pow2(qk_ratio) && gqa_ratio <= flash_attention_num_small_rows && + if (N == 1 && qk_ratio > 1 && gqa_ratio <= flash_attention_num_small_rows && qk_ratio * nek2 == neq2 && nek2 == nev2 && neq3 == 1 && nek3 == 1 && nev3 == 1) { // grouped query attention - make the N dimension equal to gqa_ratio, reduce // workgroups proportionally in y dimension. The shader will detect gqa_ratio > 1 @@ -5544,8 +5544,8 @@ static void ggml_vk_flash_attn(ggml_backend_vk_context * ctx, vk_context& subctx uint32_t split_kv = KV; uint32_t split_k = 1; - if (gqa_ratio > 1 && ctx->device->shader_core_count > 0) { - GGML_ASSERT(workgroups_x == 1); + // Try to use split_k when KV is large enough to be worth the overhead + if (workgroups_x == 1 && ctx->device->shader_core_count > 0 && KV >= 512) { // Try to run two workgroups per SM. split_k = ctx->device->shader_core_count * 2 / workgroups_y; if (split_k > 1) { diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/flash_attn_cm2.comp b/ggml/src/ggml-vulkan/vulkan-shaders/flash_attn_cm2.comp index e1baa85f9e330..b926a578aded6 100644 --- a/ggml/src/ggml-vulkan/vulkan-shaders/flash_attn_cm2.comp +++ b/ggml/src/ggml-vulkan/vulkan-shaders/flash_attn_cm2.comp @@ -131,7 +131,7 @@ ACC_TYPE perElemOpStoreCol0(const in uint32_t r, const in uint32_t c, const in A // Load the slope matrix, indexed by Q's dimension 2. ACC_TYPE perElemOpComputeSlope(const in uint32_t r, const in uint32_t c, const in ACC_TYPE elem, const in uint32_t iq2) { - const uint32_t h = iq2 + (r & (p.gqa_ratio - 1)); + const uint32_t h = iq2 + (r % p.gqa_ratio); const ACC_TYPE base = ACC_TYPE(h < p.n_head_log2 ? p.m0 : p.m1); const int exph = int(h < p.n_head_log2 ? h + 1 : 2*(h - p.n_head_log2) + 1); diff --git a/tests/test-backend-ops.cpp b/tests/test-backend-ops.cpp index 3a5741c8d959d..1ee742894695b 100644 --- a/tests/test-backend-ops.cpp +++ b/tests/test-backend-ops.cpp @@ -4532,7 +4532,9 @@ static std::vector<std::unique_ptr<test_case>> make_test_cases_perf() { for (int kv : { 4096, 8192, 16384, }) { for (int hs : { 64, 128, }) { - test_cases.emplace_back(new test_flash_attn_ext(hs, hs, 8, 4, kv, 1, true, 0, 0, GGML_PREC_F32, GGML_TYPE_F16)); + for (int nr : { 1, 4, }) { + test_cases.emplace_back(new test_flash_attn_ext(hs, hs, 8, nr, kv, 1, true, 0, 0, GGML_PREC_F32, GGML_TYPE_F16)); + } } }
diff --git a/ggml/src/ggml-vulkan/ggml-vulkan.cpp b/ggml/src/ggml-vulkan/ggml-vulkan.cpp index 783a0ff86c1c1..0e9b2e8135a7a 100644 --- a/ggml/src/ggml-vulkan/ggml-vulkan.cpp +++ b/ggml/src/ggml-vulkan/ggml-vulkan.cpp @@ -5531,7 +5531,7 @@ static void ggml_vk_flash_attn(ggml_backend_vk_context * ctx, vk_context& subctx uint32_t workgroups_y = (uint32_t)neq2; uint32_t workgroups_z = (uint32_t)neq3; - if (N == 1 && qk_ratio > 1 && is_pow2(qk_ratio) && gqa_ratio <= flash_attention_num_small_rows && + if (N == 1 && qk_ratio > 1 && gqa_ratio <= flash_attention_num_small_rows && qk_ratio * nek2 == neq2 && nek2 == nev2 && neq3 == 1 && nek3 == 1 && nev3 == 1) { // grouped query attention - make the N dimension equal to gqa_ratio, reduce // workgroups proportionally in y dimension. The shader will detect gqa_ratio > 1 @@ -5544,8 +5544,8 @@ static void ggml_vk_flash_attn(ggml_backend_vk_context * ctx, vk_context& subctx uint32_t split_kv = KV; uint32_t split_k = 1; - if (gqa_ratio > 1 && ctx->device->shader_core_count > 0) { - GGML_ASSERT(workgroups_x == 1); + // Try to use split_k when KV is large enough to be worth the overhead + if (workgroups_x == 1 && ctx->device->shader_core_count > 0 && KV >= 512) { // Try to run two workgroups per SM. split_k = ctx->device->shader_core_count * 2 / workgroups_y; if (split_k > 1) { diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/flash_attn_cm2.comp b/ggml/src/ggml-vulkan/vulkan-shaders/flash_attn_cm2.comp index e1baa85f9e330..b926a578aded6 100644 --- a/ggml/src/ggml-vulkan/vulkan-shaders/flash_attn_cm2.comp +++ b/ggml/src/ggml-vulkan/vulkan-shaders/flash_attn_cm2.comp @@ -131,7 +131,7 @@ ACC_TYPE perElemOpStoreCol0(const in uint32_t r, const in uint32_t c, const in A // Load the slope matrix, indexed by Q's dimension 2. ACC_TYPE perElemOpComputeSlope(const in uint32_t r, const in uint32_t c, const in ACC_TYPE elem, const in uint32_t iq2) { - const uint32_t h = iq2 + (r & (p.gqa_ratio - 1)); + const uint32_t h = iq2 + (r % p.gqa_ratio); const ACC_TYPE base = ACC_TYPE(h < p.n_head_log2 ? p.m0 : p.m1); const int exph = int(h < p.n_head_log2 ? h + 1 : 2*(h - p.n_head_log2) + 1); diff --git a/tests/test-backend-ops.cpp b/tests/test-backend-ops.cpp index 3a5741c8d959d..1ee742894695b 100644 --- a/tests/test-backend-ops.cpp +++ b/tests/test-backend-ops.cpp @@ -4532,7 +4532,9 @@ static std::vector<std::unique_ptr<test_case>> make_test_cases_perf() { for (int kv : { 4096, 8192, 16384, }) { for (int hs : { 64, 128, }) { - test_cases.emplace_back(new test_flash_attn_ext(hs, hs, 8, 4, kv, 1, true, 0, 0, GGML_PREC_F32, GGML_TYPE_F16)); + for (int nr : { 1, 4, }) { + test_cases.emplace_back(new test_flash_attn_ext(hs, hs, 8, nr, kv, 1, true, 0, 0, GGML_PREC_F32, GGML_TYPE_F16)); } }
[ "+ }" ]
[ 48 ]
{ "additions": 7, "author": "jeffbolznv", "deletions": 5, "html_url": "https://github.com/ggml-org/llama.cpp/pull/12931", "issue_id": 12931, "merged_at": "2025-04-16T18:37:25Z", "omission_probability": 0.1, "pr_number": 12931, "repo": "ggml-org/llama.cpp", "title": "vulkan: enable coopmat2 FA gqa and split_k optimizations more often", "total_changes": 12 }
815
diff --git a/ggml/src/ggml-cann/aclnn_ops.cpp b/ggml/src/ggml-cann/aclnn_ops.cpp index 2c5cdcae32cc8..2c6737ea8cf3f 100644 --- a/ggml/src/ggml-cann/aclnn_ops.cpp +++ b/ggml/src/ggml-cann/aclnn_ops.cpp @@ -625,6 +625,10 @@ static void ggml_cann_avg_pool2d(ggml_backend_cann_context& ctx, bool count_include_pad = true; int64_t divisor_override = 0; int8_t cube_math_type = 0; +#ifdef ASCEND_310P + cube_math_type = 1; +#endif + GGML_CANN_CALL_ACLNN_OP(AvgPool2d, acl_src, kernel_size, strides, paddings_avg, ceil_mode, count_include_pad, divisor_override, cube_math_type, acl_dst); @@ -2590,6 +2594,10 @@ void ggml_cann_conv_transpose_1d(ggml_backend_cann_context& ctx, ggml_tensor* ds int64_t groups = 1; int8_t cubeMathType = 0; +#ifdef ASCEND_310P + cubeMathType = 1; +#endif + GGML_CANN_CALL_ACLNN_OP(Convolution, acl_input, acl_weight, nullptr, stride, padding, dilation, transposed, padding, groups, acl_dst, cubeMathType); diff --git a/ggml/src/ggml-cann/ggml-cann.cpp b/ggml/src/ggml-cann/ggml-cann.cpp index 08b9ca301c617..ca41e02607091 100644 --- a/ggml/src/ggml-cann/ggml-cann.cpp +++ b/ggml/src/ggml-cann/ggml-cann.cpp @@ -2022,6 +2022,10 @@ static bool ggml_backend_cann_supports_op(ggml_backend_dev_t dev, return true; case GGML_TYPE_Q8_0: case GGML_TYPE_Q4_0: +#ifdef ASCEND_310P + // Q4 && Q8 per group is not suppor on 310p device + return false; +#endif // only support contiguous for quantized types. return ggml_is_contiguous(op->src[0]) && ggml_is_contiguous(op->src[1]); @@ -2107,6 +2111,12 @@ static bool ggml_backend_cann_supports_op(ggml_backend_dev_t dev, } case GGML_OP_POOL_2D: { const int32_t * opts = (const int32_t *) op->op_params; +#ifdef ASCEND_310P + enum ggml_op_pool opt = static_cast<ggml_op_pool>(opts[0]); + if(opt == GGML_OP_POOL_MAX){ + return false; + } +#endif const int k0 = opts[1]; const int k1 = opts[2]; const int p0 = opts[5];
diff --git a/ggml/src/ggml-cann/aclnn_ops.cpp b/ggml/src/ggml-cann/aclnn_ops.cpp index 2c5cdcae32cc8..2c6737ea8cf3f 100644 --- a/ggml/src/ggml-cann/aclnn_ops.cpp +++ b/ggml/src/ggml-cann/aclnn_ops.cpp @@ -625,6 +625,10 @@ static void ggml_cann_avg_pool2d(ggml_backend_cann_context& ctx, bool count_include_pad = true; int64_t divisor_override = 0; int8_t cube_math_type = 0; + cube_math_type = 1; GGML_CANN_CALL_ACLNN_OP(AvgPool2d, acl_src, kernel_size, strides, paddings_avg, ceil_mode, count_include_pad, divisor_override, cube_math_type, acl_dst); @@ -2590,6 +2594,10 @@ void ggml_cann_conv_transpose_1d(ggml_backend_cann_context& ctx, ggml_tensor* ds int64_t groups = 1; int8_t cubeMathType = 0; + cubeMathType = 1; GGML_CANN_CALL_ACLNN_OP(Convolution, acl_input, acl_weight, nullptr, stride, padding, dilation, transposed, padding, groups, acl_dst, cubeMathType); diff --git a/ggml/src/ggml-cann/ggml-cann.cpp b/ggml/src/ggml-cann/ggml-cann.cpp index 08b9ca301c617..ca41e02607091 100644 --- a/ggml/src/ggml-cann/ggml-cann.cpp +++ b/ggml/src/ggml-cann/ggml-cann.cpp @@ -2022,6 +2022,10 @@ static bool ggml_backend_cann_supports_op(ggml_backend_dev_t dev, return true; case GGML_TYPE_Q8_0: case GGML_TYPE_Q4_0: + // Q4 && Q8 per group is not suppor on 310p device + return false; // only support contiguous for quantized types. return ggml_is_contiguous(op->src[0]) && ggml_is_contiguous(op->src[1]); @@ -2107,6 +2111,12 @@ static bool ggml_backend_cann_supports_op(ggml_backend_dev_t dev, } case GGML_OP_POOL_2D: { const int32_t * opts = (const int32_t *) op->op_params; + enum ggml_op_pool opt = static_cast<ggml_op_pool>(opts[0]); + if(opt == GGML_OP_POOL_MAX){ + return false; + } const int k0 = opts[1]; const int k1 = opts[2]; const int p0 = opts[5];
[]
[]
{ "additions": 18, "author": "noemotiovon", "deletions": 0, "html_url": "https://github.com/ggml-org/llama.cpp/pull/12962", "issue_id": 12962, "merged_at": "2025-04-16T08:21:05Z", "omission_probability": 0.1, "pr_number": 12962, "repo": "ggml-org/llama.cpp", "title": "[CANN]310P OPT Support", "total_changes": 18 }
816
diff --git a/ggml/src/ggml-sycl/ggml-sycl.cpp b/ggml/src/ggml-sycl/ggml-sycl.cpp index 09800d3403fe1..4d2fda0bfa6ae 100644 --- a/ggml/src/ggml-sycl/ggml-sycl.cpp +++ b/ggml/src/ggml-sycl/ggml-sycl.cpp @@ -4009,10 +4009,8 @@ static bool ggml_backend_sycl_device_supports_op(ggml_backend_dev_t dev, const g case GGML_OP_ROPE: { const int mode = ((const int32_t *) op->op_params)[2]; - if (mode & GGML_ROPE_TYPE_MROPE) { - return false; - } - if (mode & GGML_ROPE_TYPE_VISION) { + // mode is not used as a bitmask in practice, the various rope type modes are independent implementations + if (mode == GGML_ROPE_TYPE_MROPE) { return false; } return ggml_is_contiguous(op->src[0]); diff --git a/ggml/src/ggml-sycl/rope.cpp b/ggml/src/ggml-sycl/rope.cpp index bbcb356e97992..80e050f241496 100644 --- a/ggml/src/ggml-sycl/rope.cpp +++ b/ggml/src/ggml-sycl/rope.cpp @@ -1,9 +1,15 @@ #include "rope.hpp" +#include "ggml-sycl/common.hpp" +#include "ggml.h" struct rope_corr_dims { float v[2]; }; +struct mrope_sections { + int v[4]; +}; + static float rope_yarn_ramp(const float low, const float high, const int i0) { const float y = (i0 / 2 - low) / sycl::max(0.001f, high - low); return 1.0f - sycl::min(1.0f, sycl::max(0.0f, y)); @@ -114,6 +120,48 @@ static void rope_neox( dst[i + n_dims/2] = x0*sin_theta + x1*cos_theta; } +template <typename T, bool has_ff> +static void rope_vision(const T * x, T * dst, const int ne0, const int ne1, const int ne2, const size_t s1, + const size_t s2, const int n_dims, const int32_t * pos, const float freq_scale, + const float ext_factor, const float attn_factor, const rope_corr_dims corr_dims, + const float theta_scale, const float * freq_factors, const mrope_sections sections, + const sycl::nd_item<3> & item_ct1) { + // get index pos + const int i0 = 2 * (item_ct1.get_group(1) * item_ct1.get_local_range(1) + item_ct1.get_local_id(1)); + if (i0 >= ne0) { + return; + } + const int row_dst = (item_ct1.get_group(2) * item_ct1.get_local_range(2)) + item_ct1.get_local_id(2); + const int row_x = row_dst % ne1; + const int channel_x = row_dst / ne1; + const int idst = (row_dst * ne0) + (i0 / 2); + const size_t ix = ((size_t) channel_x * s2) + ((size_t) row_x * s1) + (i0 / 2); + + const int sect_dims = sections.v[0] + sections.v[1]; + const int sector = (i0 / 2) % sect_dims; + + float theta_base = 0.0f; + if (sector < sections.v[0]) { + const int p = sector; + theta_base = pos[channel_x] * sycl::pow(theta_scale, (float) p); + } else { + // Simplified from CUDA backend code: if (sector >= sections.v[0] && sector < sec_w) which is just sector >= sections.v[0] + const int p = sector - sections.v[0]; + theta_base = pos[channel_x + ne2] * sycl::pow(theta_scale, (float) p); + } + + const float freq_factor = has_ff ? freq_factors[i0 / 2] : 1.0f; + float cos_theta; + float sin_theta; + rope_yarn(theta_base / freq_factor, freq_scale, corr_dims, i0, ext_factor, attn_factor, &cos_theta, &sin_theta); + const float x0 = x[ix + 0]; + const float x1 = x[ix + n_dims]; + + // store results in dst + dst[idst + 0] = x0 * cos_theta - x1 * sin_theta; + dst[idst + n_dims] = x0 * sin_theta + x1 * cos_theta; +} + template <typename T> static void rope_norm_sycl( const T *x, T *dst, int ne0, int n_dims, int nr, const int32_t *pos, float freq_scale, int p_delta_rows, @@ -192,21 +240,58 @@ static void rope_neox_sycl( } } +// rope vision +template <typename T> +static void rope_vision_sycl(const T * x, T * dst, const int ne0, const int ne1, const int ne2, const size_t s1, + const size_t s2, const int n_dims, const int nr, const int32_t * pos, + const float freq_scale, const float freq_base, const float ext_factor, + const float attn_factor, const rope_corr_dims corr_dims, const float * freq_factors, + const mrope_sections sections, queue_ptr stream) { + GGML_ASSERT(ne0 % 2 == 0); + const sycl::range<3> block_dims(1, SYCL_ROPE_BLOCK_SIZE, 1); + const int n_blocks_y = (ne0 + 2 * SYCL_ROPE_BLOCK_SIZE - 1) / (2 * SYCL_ROPE_BLOCK_SIZE); + const sycl::range<3> grid_dims(1, n_blocks_y, nr); + const sycl::nd_range<3> nd_range(grid_dims * block_dims, block_dims); + + const float theta_scale = std::pow(freq_base, -2.0f / n_dims); + // Add FP16 capability check if T could be sycl::half + if constexpr (std::is_same_v<T, sycl::half>) { + dpct::has_capability_or_fail(stream->get_device(), { sycl::aspect::fp16 }); + } + // launch kernel + if (freq_factors == nullptr) { + stream->parallel_for(nd_range, [=](sycl::nd_item<3> item_ct1) { + rope_vision<T, false>(x, dst, ne0, ne1, ne2, s1, s2, n_dims, pos, freq_scale, ext_factor, attn_factor, + corr_dims, theta_scale, freq_factors, sections, item_ct1); + }); + } else { + stream->parallel_for(nd_range, [=](sycl::nd_item<3> item_ct1) { + rope_vision<T, true>(x, dst, ne0, ne1, ne2, s1, s2, n_dims, pos, freq_scale, ext_factor, attn_factor, + corr_dims, theta_scale, freq_factors, sections, item_ct1); + }); + } +} + void ggml_sycl_op_rope(ggml_backend_sycl_context & ctx, ggml_tensor *dst) { GGML_ASSERT(dst->src[0]->type == GGML_TYPE_F32 || dst->src[0]->type == GGML_TYPE_F16); GGML_ASSERT( dst->type == GGML_TYPE_F32 || dst->type == GGML_TYPE_F16); GGML_ASSERT(dst->src[0]->type == dst->type); - - const int64_t ne00 = dst->src[0]->ne[0]; - const int64_t ne01 = dst->src[0]->ne[1]; + const int64_t ne00 = dst->src[0]->ne[0]; // head dims + const int64_t ne01 = dst->src[0]->ne[1]; // num heads + const int64_t ne02 = dst->src[0]->ne[2]; // num heads const int64_t nr = ggml_nrows(dst->src[0]); + const size_t s01 = dst->src[0]->nb[1] / ggml_type_size(dst->src[0]->type); + const size_t s02 = dst->src[0]->nb[2] / ggml_type_size(dst->src[0]->type); + + //const int n_past = ((int32_t *) dst->op_params)[0]; const int n_dims = ((int32_t *) dst->op_params)[1]; const int mode = ((int32_t *) dst->op_params)[2]; //const int n_ctx = ((int32_t *) dst->op_params)[3]; const int n_ctx_orig = ((int32_t *) dst->op_params)[4]; + mrope_sections sections; // RoPE alteration for extended context float freq_base; @@ -222,8 +307,10 @@ void ggml_sycl_op_rope(ggml_backend_sycl_context & ctx, ggml_tensor *dst) { memcpy(&attn_factor, (int32_t *) dst->op_params + 8, sizeof(float)); memcpy(&beta_fast, (int32_t *) dst->op_params + 9, sizeof(float)); memcpy(&beta_slow, (int32_t *) dst->op_params + 10, sizeof(float)); + memcpy(&sections.v, (int32_t *) dst->op_params + 11, sizeof(int)*4); const bool is_neox = mode & GGML_ROPE_TYPE_NEOX; + const bool is_vision = mode == GGML_ROPE_TYPE_VISION; const int32_t * pos = (const int32_t *) dst->src[1]->data; @@ -240,6 +327,7 @@ void ggml_sycl_op_rope(ggml_backend_sycl_context & ctx, ggml_tensor *dst) { // compute if (is_neox) { + GGML_SYCL_DEBUG("%s: neox path\n", __func__); if (dst->src[0]->type == GGML_TYPE_F32) { rope_neox_sycl( (const float *)dst->src[0]->data, (float *)dst->data, ne00, n_dims, nr, pos, freq_scale, ne01, freq_base, ext_factor, @@ -253,7 +341,19 @@ void ggml_sycl_op_rope(ggml_backend_sycl_context & ctx, ggml_tensor *dst) { } else { GGML_ABORT("fatal error"); } + } else if (is_vision) { + GGML_SYCL_DEBUG("%s: vision path\n", __func__); + if (dst->src[0]->type == GGML_TYPE_F16) { + rope_vision_sycl((const sycl::half *)dst->src[0]->data, (sycl::half *)dst->data, ne00, ne01, ne02, s01, s02, n_dims, nr, pos, freq_scale, + freq_base, ext_factor, attn_factor, corr_dims, freq_factors, sections, main_stream); + } else if (dst->src[0]->type == GGML_TYPE_F32) { + rope_vision_sycl((const float *) dst->src[0]->data, (float *)dst->data, ne00, ne01, ne02, s01, s02, n_dims, nr, pos, freq_scale, + freq_base, ext_factor, attn_factor, corr_dims, freq_factors, sections, main_stream); + } else { + GGML_ABORT("Fatal error: Tensor type unsupported!"); + } } else { + GGML_SYCL_DEBUG("%s: norm path\n", __func__); if (dst->src[0]->type == GGML_TYPE_F32) { rope_norm_sycl( (const float *)dst->src[0]->data, (float *)dst->data, ne00, n_dims, nr, pos, freq_scale, ne01, freq_base, ext_factor,
diff --git a/ggml/src/ggml-sycl/ggml-sycl.cpp b/ggml/src/ggml-sycl/ggml-sycl.cpp index 09800d3403fe1..4d2fda0bfa6ae 100644 --- a/ggml/src/ggml-sycl/ggml-sycl.cpp +++ b/ggml/src/ggml-sycl/ggml-sycl.cpp @@ -4009,10 +4009,8 @@ static bool ggml_backend_sycl_device_supports_op(ggml_backend_dev_t dev, const g case GGML_OP_ROPE: { const int mode = ((const int32_t *) op->op_params)[2]; - if (mode & GGML_ROPE_TYPE_MROPE) { - return false; - if (mode & GGML_ROPE_TYPE_VISION) { + // mode is not used as a bitmask in practice, the various rope type modes are independent implementations + if (mode == GGML_ROPE_TYPE_MROPE) { return false; } return ggml_is_contiguous(op->src[0]); diff --git a/ggml/src/ggml-sycl/rope.cpp b/ggml/src/ggml-sycl/rope.cpp index bbcb356e97992..80e050f241496 100644 --- a/ggml/src/ggml-sycl/rope.cpp +++ b/ggml/src/ggml-sycl/rope.cpp @@ -1,9 +1,15 @@ #include "rope.hpp" +#include "ggml-sycl/common.hpp" +#include "ggml.h" struct rope_corr_dims { float v[2]; }; +struct mrope_sections { + int v[4]; +}; static float rope_yarn_ramp(const float low, const float high, const int i0) { const float y = (i0 / 2 - low) / sycl::max(0.001f, high - low); return 1.0f - sycl::min(1.0f, sycl::max(0.0f, y)); @@ -114,6 +120,48 @@ static void rope_neox( dst[i + n_dims/2] = x0*sin_theta + x1*cos_theta; +template <typename T, bool has_ff> +static void rope_vision(const T * x, T * dst, const int ne0, const int ne1, const int ne2, const size_t s1, + const size_t s2, const int n_dims, const int32_t * pos, const float freq_scale, + const float ext_factor, const float attn_factor, const rope_corr_dims corr_dims, + const float theta_scale, const float * freq_factors, const mrope_sections sections, + const sycl::nd_item<3> & item_ct1) { + // get index pos + const int i0 = 2 * (item_ct1.get_group(1) * item_ct1.get_local_range(1) + item_ct1.get_local_id(1)); + if (i0 >= ne0) { + return; + const int row_x = row_dst % ne1; + const int channel_x = row_dst / ne1; + const int idst = (row_dst * ne0) + (i0 / 2); + const size_t ix = ((size_t) channel_x * s2) + ((size_t) row_x * s1) + (i0 / 2); + const int sect_dims = sections.v[0] + sections.v[1]; + const int sector = (i0 / 2) % sect_dims; + if (sector < sections.v[0]) { + const int p = sector; + theta_base = pos[channel_x] * sycl::pow(theta_scale, (float) p); + // Simplified from CUDA backend code: if (sector >= sections.v[0] && sector < sec_w) which is just sector >= sections.v[0] + const int p = sector - sections.v[0]; + theta_base = pos[channel_x + ne2] * sycl::pow(theta_scale, (float) p); + const float freq_factor = has_ff ? freq_factors[i0 / 2] : 1.0f; + float cos_theta; + float sin_theta; + const float x0 = x[ix + 0]; + // store results in dst + dst[idst + 0] = x0 * cos_theta - x1 * sin_theta; + dst[idst + n_dims] = x0 * sin_theta + x1 * cos_theta; template <typename T> static void rope_norm_sycl( const T *x, T *dst, int ne0, int n_dims, int nr, const int32_t *pos, float freq_scale, int p_delta_rows, @@ -192,21 +240,58 @@ static void rope_neox_sycl( } +// rope vision +template <typename T> +static void rope_vision_sycl(const T * x, T * dst, const int ne0, const int ne1, const int ne2, const size_t s1, + const size_t s2, const int n_dims, const int nr, const int32_t * pos, + const float freq_scale, const float freq_base, const float ext_factor, + const float attn_factor, const rope_corr_dims corr_dims, const float * freq_factors, + const mrope_sections sections, queue_ptr stream) { + GGML_ASSERT(ne0 % 2 == 0); + const sycl::range<3> block_dims(1, SYCL_ROPE_BLOCK_SIZE, 1); + const int n_blocks_y = (ne0 + 2 * SYCL_ROPE_BLOCK_SIZE - 1) / (2 * SYCL_ROPE_BLOCK_SIZE); + const sycl::range<3> grid_dims(1, n_blocks_y, nr); + const sycl::nd_range<3> nd_range(grid_dims * block_dims, block_dims); + const float theta_scale = std::pow(freq_base, -2.0f / n_dims); + // Add FP16 capability check if T could be sycl::half + if constexpr (std::is_same_v<T, sycl::half>) { + dpct::has_capability_or_fail(stream->get_device(), { sycl::aspect::fp16 }); + // launch kernel + if (freq_factors == nullptr) { + rope_vision<T, false>(x, dst, ne0, ne1, ne2, s1, s2, n_dims, pos, freq_scale, ext_factor, attn_factor, + corr_dims, theta_scale, freq_factors, sections, item_ct1); + rope_vision<T, true>(x, dst, ne0, ne1, ne2, s1, s2, n_dims, pos, freq_scale, ext_factor, attn_factor, + corr_dims, theta_scale, freq_factors, sections, item_ct1); void ggml_sycl_op_rope(ggml_backend_sycl_context & ctx, ggml_tensor *dst) { GGML_ASSERT(dst->src[0]->type == GGML_TYPE_F32 || dst->src[0]->type == GGML_TYPE_F16); GGML_ASSERT( dst->type == GGML_TYPE_F32 || dst->type == GGML_TYPE_F16); GGML_ASSERT(dst->src[0]->type == dst->type); - - const int64_t ne00 = dst->src[0]->ne[0]; - const int64_t ne01 = dst->src[0]->ne[1]; + const int64_t ne00 = dst->src[0]->ne[0]; // head dims + const int64_t ne01 = dst->src[0]->ne[1]; // num heads + const int64_t ne02 = dst->src[0]->ne[2]; // num heads const int64_t nr = ggml_nrows(dst->src[0]); + const size_t s01 = dst->src[0]->nb[1] / ggml_type_size(dst->src[0]->type); + const size_t s02 = dst->src[0]->nb[2] / ggml_type_size(dst->src[0]->type); //const int n_past = ((int32_t *) dst->op_params)[0]; const int n_dims = ((int32_t *) dst->op_params)[1]; const int mode = ((int32_t *) dst->op_params)[2]; //const int n_ctx = ((int32_t *) dst->op_params)[3]; const int n_ctx_orig = ((int32_t *) dst->op_params)[4]; + mrope_sections sections; // RoPE alteration for extended context float freq_base; @@ -222,8 +307,10 @@ void ggml_sycl_op_rope(ggml_backend_sycl_context & ctx, ggml_tensor *dst) { memcpy(&attn_factor, (int32_t *) dst->op_params + 8, sizeof(float)); memcpy(&beta_fast, (int32_t *) dst->op_params + 9, sizeof(float)); memcpy(&beta_slow, (int32_t *) dst->op_params + 10, sizeof(float)); + memcpy(&sections.v, (int32_t *) dst->op_params + 11, sizeof(int)*4); const bool is_neox = mode & GGML_ROPE_TYPE_NEOX; + const bool is_vision = mode == GGML_ROPE_TYPE_VISION; const int32_t * pos = (const int32_t *) dst->src[1]->data; @@ -240,6 +327,7 @@ void ggml_sycl_op_rope(ggml_backend_sycl_context & ctx, ggml_tensor *dst) { // compute if (is_neox) { + GGML_SYCL_DEBUG("%s: neox path\n", __func__); rope_neox_sycl( @@ -253,7 +341,19 @@ void ggml_sycl_op_rope(ggml_backend_sycl_context & ctx, ggml_tensor *dst) { } else { GGML_ABORT("fatal error"); } + GGML_SYCL_DEBUG("%s: vision path\n", __func__); + if (dst->src[0]->type == GGML_TYPE_F16) { + rope_vision_sycl((const sycl::half *)dst->src[0]->data, (sycl::half *)dst->data, ne00, ne01, ne02, s01, s02, n_dims, nr, pos, freq_scale, + } else if (dst->src[0]->type == GGML_TYPE_F32) { + rope_vision_sycl((const float *) dst->src[0]->data, (float *)dst->data, ne00, ne01, ne02, s01, s02, n_dims, nr, pos, freq_scale, + } else { + GGML_ABORT("Fatal error: Tensor type unsupported!"); + } } else { + GGML_SYCL_DEBUG("%s: norm path\n", __func__); rope_norm_sycl(
[ "- }", "+ const int row_dst = (item_ct1.get_group(2) * item_ct1.get_local_range(2)) + item_ct1.get_local_id(2);", "+ float theta_base = 0.0f;", "+ rope_yarn(theta_base / freq_factor, freq_scale, corr_dims, i0, ext_factor, attn_factor, &cos_theta, &sin_theta);", "+ const float x1 = x[ix + n_dims];", "+ } else if (is_vision) {" ]
[ 10, 52, 61, 74, 76, 171 ]
{ "additions": 105, "author": "qnixsynapse", "deletions": 7, "html_url": "https://github.com/ggml-org/llama.cpp/pull/12887", "issue_id": 12887, "merged_at": "2025-04-15T08:37:43Z", "omission_probability": 0.1, "pr_number": 12887, "repo": "ggml-org/llama.cpp", "title": "SYCL: Add ROPE vision kernel", "total_changes": 112 }
817
diff --git a/ggml/src/ggml-metal/ggml-metal.m b/ggml/src/ggml-metal/ggml-metal.m index 9f1c6c6ccc09f..16ca08383c7ed 100644 --- a/ggml/src/ggml-metal/ggml-metal.m +++ b/ggml/src/ggml-metal/ggml-metal.m @@ -402,6 +402,13 @@ static void ggml_backend_metal_device_rel(struct ggml_backend_metal_device_conte GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_Q8_0_H192, GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_Q8_0_HK192_HV128, GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_Q8_0_H256, + GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_VEC_F16_H96, + GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_VEC_BF16_H96, + GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_VEC_Q4_0_H96, + GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_VEC_Q4_1_H96, + GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_VEC_Q5_0_H96, + GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_VEC_Q5_1_H96, + GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_VEC_Q8_0_H96, GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_VEC_F16_H128, GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_VEC_BF16_H128, GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_VEC_Q4_0_H128, @@ -1059,6 +1066,13 @@ @implementation GGMLMetalClass GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_Q8_0_H192, flash_attn_ext_q8_0_h192, has_simdgroup_mm); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_Q8_0_HK192_HV128, flash_attn_ext_q8_0_hk192_hv128, has_simdgroup_mm); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_Q8_0_H256, flash_attn_ext_q8_0_h256, has_simdgroup_mm); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_VEC_F16_H96, flash_attn_ext_vec_f16_h96, has_simdgroup_reduction); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_VEC_BF16_H96, flash_attn_ext_vec_bf16_h96, has_simdgroup_reduction && use_bfloat); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_VEC_Q4_0_H96, flash_attn_ext_vec_q4_0_h96, has_simdgroup_reduction); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_VEC_Q4_1_H96, flash_attn_ext_vec_q4_1_h96, has_simdgroup_reduction); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_VEC_Q5_0_H96, flash_attn_ext_vec_q5_0_h96, has_simdgroup_reduction); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_VEC_Q5_1_H96, flash_attn_ext_vec_q5_1_h96, has_simdgroup_reduction); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_VEC_Q8_0_H96, flash_attn_ext_vec_q8_0_h96, has_simdgroup_reduction); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_VEC_F16_H128, flash_attn_ext_vec_f16_h128, has_simdgroup_reduction); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_VEC_BF16_H128, flash_attn_ext_vec_bf16_h128, has_simdgroup_reduction && use_bfloat); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_VEC_Q4_0_H128, flash_attn_ext_vec_q4_0_h128, has_simdgroup_reduction); @@ -3843,7 +3857,7 @@ static void ggml_metal_encode_node( // TODO: add vec kernels for (ne00%64 == 0) and maybe also for (ne00%32 == 0) // for now avoiding mainly to keep the number of templates/kernels a bit lower // these are now trivial to add after: https://github.com/ggml-org/llama.cpp/pull/12612 - if (ne01 >= 4 || (ne00%128 != 0 && ne00 != 192)) { + if (ne01 >= 4 || (ne00%128 != 0 && ne00 != 96 && ne00 != 192)) { switch (src1->type) { case GGML_TYPE_F16: { @@ -4010,6 +4024,24 @@ static void ggml_metal_encode_node( use_vec_kernel = true; switch (ne00) { + case 96: + { + switch (src1->type) { + case GGML_TYPE_F16: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_VEC_F16_H96].pipeline; break; + case GGML_TYPE_BF16: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_VEC_BF16_H96].pipeline; break; + case GGML_TYPE_Q4_0: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_VEC_Q4_0_H96].pipeline; break; + case GGML_TYPE_Q4_1: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_VEC_Q4_1_H96].pipeline; break; + case GGML_TYPE_Q5_0: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_VEC_Q5_0_H96].pipeline; break; + case GGML_TYPE_Q5_1: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_VEC_Q5_1_H96].pipeline; break; + case GGML_TYPE_Q8_0: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_VEC_Q8_0_H96].pipeline; break; + default: + { + GGML_LOG_ERROR("unsupported type: %d\n", src1->type); + GGML_LOG_ERROR("add template specialization for this type\n"); + GGML_ABORT("add template specialization for this type"); + } + } + } break; case 128: { switch (src1->type) { diff --git a/ggml/src/ggml-metal/ggml-metal.metal b/ggml/src/ggml-metal/ggml-metal.metal index b08666e27991f..0fb28238a8adb 100644 --- a/ggml/src/ggml-metal/ggml-metal.metal +++ b/ggml/src/ggml-metal/ggml-metal.metal @@ -3959,6 +3959,16 @@ kernel void kernel_flash_attn_ext_vec( typedef decltype(kernel_flash_attn_ext_vec<FA_TYPES, half4, 1, dequantize_f16_t4, half4, 1, dequantize_f16_t4, 128, 128, 4>) flash_attn_ext_vec_t; +template [[host_name("kernel_flash_attn_ext_vec_f16_h96")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec<FA_TYPES, half4, 1, dequantize_f16_t4, half4, 1, dequantize_f16_t4, 96, 96, 4>; +#if defined(GGML_METAL_USE_BF16) +template [[host_name("kernel_flash_attn_ext_vec_bf16_h96")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec<FA_TYPES, bfloat4, 1, dequantize_bf16_t4, bfloat4, 1, dequantize_bf16_t4, 96, 96, 4>; +#endif +template [[host_name("kernel_flash_attn_ext_vec_q4_0_h96")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec<FA_TYPES, block_q4_0, 8, dequantize_q4_0_t4, block_q4_0, 8, dequantize_q4_0_t4, 96, 96, 4>; +template [[host_name("kernel_flash_attn_ext_vec_q4_1_h96")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec<FA_TYPES, block_q4_1, 8, dequantize_q4_1_t4, block_q4_1, 8, dequantize_q4_1_t4, 96, 96, 4>; +template [[host_name("kernel_flash_attn_ext_vec_q5_0_h96")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec<FA_TYPES, block_q5_0, 8, dequantize_q5_0_t4, block_q5_0, 8, dequantize_q5_0_t4, 96, 96, 4>; +template [[host_name("kernel_flash_attn_ext_vec_q5_1_h96")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec<FA_TYPES, block_q5_1, 8, dequantize_q5_1_t4, block_q5_1, 8, dequantize_q5_1_t4, 96, 96, 4>; +template [[host_name("kernel_flash_attn_ext_vec_q8_0_h96")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec<FA_TYPES, block_q8_0, 8, dequantize_q8_0_t4, block_q8_0, 8, dequantize_q8_0_t4, 96, 96, 4>; + template [[host_name("kernel_flash_attn_ext_vec_f16_h128")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec<FA_TYPES, half4, 1, dequantize_f16_t4, half4, 1, dequantize_f16_t4, 128, 128, 4>; #if defined(GGML_METAL_USE_BF16) template [[host_name("kernel_flash_attn_ext_vec_bf16_h128")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec<FA_TYPES, bfloat4, 1, dequantize_bf16_t4, bfloat4, 1, dequantize_bf16_t4, 128, 128, 4>;
diff --git a/ggml/src/ggml-metal/ggml-metal.m b/ggml/src/ggml-metal/ggml-metal.m index 9f1c6c6ccc09f..16ca08383c7ed 100644 --- a/ggml/src/ggml-metal/ggml-metal.m +++ b/ggml/src/ggml-metal/ggml-metal.m @@ -402,6 +402,13 @@ static void ggml_backend_metal_device_rel(struct ggml_backend_metal_device_conte GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_Q8_0_H192, GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_Q8_0_HK192_HV128, GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_Q8_0_H256, + GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_VEC_F16_H96, + GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_VEC_BF16_H96, + GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_VEC_Q4_0_H96, + GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_VEC_Q4_1_H96, + GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_VEC_Q5_0_H96, + GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_VEC_Q5_1_H96, + GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_VEC_Q8_0_H96, GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_VEC_F16_H128, GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_VEC_BF16_H128, GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_VEC_Q4_0_H128, @@ -1059,6 +1066,13 @@ @implementation GGMLMetalClass GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_Q8_0_H192, flash_attn_ext_q8_0_h192, has_simdgroup_mm); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_Q8_0_HK192_HV128, flash_attn_ext_q8_0_hk192_hv128, has_simdgroup_mm); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_Q8_0_H256, flash_attn_ext_q8_0_h256, has_simdgroup_mm); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_VEC_F16_H96, flash_attn_ext_vec_f16_h96, has_simdgroup_reduction); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_VEC_BF16_H96, flash_attn_ext_vec_bf16_h96, has_simdgroup_reduction && use_bfloat); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_VEC_Q4_0_H96, flash_attn_ext_vec_q4_0_h96, has_simdgroup_reduction); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_VEC_Q5_0_H96, flash_attn_ext_vec_q5_0_h96, has_simdgroup_reduction); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_VEC_Q5_1_H96, flash_attn_ext_vec_q5_1_h96, has_simdgroup_reduction); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_VEC_Q8_0_H96, flash_attn_ext_vec_q8_0_h96, has_simdgroup_reduction); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_VEC_F16_H128, flash_attn_ext_vec_f16_h128, has_simdgroup_reduction); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_VEC_BF16_H128, flash_attn_ext_vec_bf16_h128, has_simdgroup_reduction && use_bfloat); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_VEC_Q4_0_H128, flash_attn_ext_vec_q4_0_h128, has_simdgroup_reduction); @@ -3843,7 +3857,7 @@ static void ggml_metal_encode_node( // TODO: add vec kernels for (ne00%64 == 0) and maybe also for (ne00%32 == 0) // for now avoiding mainly to keep the number of templates/kernels a bit lower // these are now trivial to add after: https://github.com/ggml-org/llama.cpp/pull/12612 - if (ne01 >= 4 || (ne00%128 != 0 && ne00 != 192)) { + if (ne01 >= 4 || (ne00%128 != 0 && ne00 != 96 && ne00 != 192)) { switch (src1->type) { case GGML_TYPE_F16: @@ -4010,6 +4024,24 @@ static void ggml_metal_encode_node( use_vec_kernel = true; switch (ne00) { + case 96: + { + switch (src1->type) { + case GGML_TYPE_F16: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_VEC_F16_H96].pipeline; break; + case GGML_TYPE_Q4_0: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_VEC_Q4_0_H96].pipeline; break; + case GGML_TYPE_Q4_1: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_VEC_Q4_1_H96].pipeline; break; + case GGML_TYPE_Q5_1: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_VEC_Q5_1_H96].pipeline; break; + case GGML_TYPE_Q8_0: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_VEC_Q8_0_H96].pipeline; break; + default: + { + GGML_LOG_ERROR("add template specialization for this type\n"); + GGML_ABORT("add template specialization for this type"); + } + } + } break; case 128: switch (src1->type) { diff --git a/ggml/src/ggml-metal/ggml-metal.metal b/ggml/src/ggml-metal/ggml-metal.metal index b08666e27991f..0fb28238a8adb 100644 --- a/ggml/src/ggml-metal/ggml-metal.metal +++ b/ggml/src/ggml-metal/ggml-metal.metal @@ -3959,6 +3959,16 @@ kernel void kernel_flash_attn_ext_vec( typedef decltype(kernel_flash_attn_ext_vec<FA_TYPES, half4, 1, dequantize_f16_t4, half4, 1, dequantize_f16_t4, 128, 128, 4>) flash_attn_ext_vec_t; +template [[host_name("kernel_flash_attn_ext_vec_f16_h96")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec<FA_TYPES, half4, 1, dequantize_f16_t4, half4, 1, dequantize_f16_t4, 96, 96, 4>; +#if defined(GGML_METAL_USE_BF16) +template [[host_name("kernel_flash_attn_ext_vec_bf16_h96")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec<FA_TYPES, bfloat4, 1, dequantize_bf16_t4, bfloat4, 1, dequantize_bf16_t4, 96, 96, 4>; +#endif +template [[host_name("kernel_flash_attn_ext_vec_q4_0_h96")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec<FA_TYPES, block_q4_0, 8, dequantize_q4_0_t4, block_q4_0, 8, dequantize_q4_0_t4, 96, 96, 4>; +template [[host_name("kernel_flash_attn_ext_vec_q5_0_h96")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec<FA_TYPES, block_q5_0, 8, dequantize_q5_0_t4, block_q5_0, 8, dequantize_q5_0_t4, 96, 96, 4>; +template [[host_name("kernel_flash_attn_ext_vec_q8_0_h96")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec<FA_TYPES, block_q8_0, 8, dequantize_q8_0_t4, block_q8_0, 8, dequantize_q8_0_t4, 96, 96, 4>; + template [[host_name("kernel_flash_attn_ext_vec_f16_h128")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec<FA_TYPES, half4, 1, dequantize_f16_t4, half4, 1, dequantize_f16_t4, 128, 128, 4>; #if defined(GGML_METAL_USE_BF16) template [[host_name("kernel_flash_attn_ext_vec_bf16_h128")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec<FA_TYPES, bfloat4, 1, dequantize_bf16_t4, bfloat4, 1, dequantize_bf16_t4, 128, 128, 4>;
[ "+ GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_VEC_Q4_1_H96, flash_attn_ext_vec_q4_1_h96, has_simdgroup_reduction);", "+ case GGML_TYPE_BF16: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_VEC_BF16_H96].pipeline; break;", "+ case GGML_TYPE_Q5_0: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_VEC_Q5_0_H96].pipeline; break;", "+ GGML_LOG_ERROR(\"unsupported type: %d\\n\", src1->type);", "+template [[host_name(\"kernel_flash_attn_ext_vec_q4_1_h96\")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec<FA_TYPES, block_q4_1, 8, dequantize_q4_1_t4, block_q4_1, 8, dequantize_q4_1_t4, 96, 96, 4>;", "+template [[host_name(\"kernel_flash_attn_ext_vec_q5_1_h96\")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec<FA_TYPES, block_q5_1, 8, dequantize_q5_1_t4, block_q5_1, 8, dequantize_q5_1_t4, 96, 96, 4>;" ]
[ 25, 49, 52, 57, 79, 81 ]
{ "additions": 43, "author": "ggerganov", "deletions": 1, "html_url": "https://github.com/ggml-org/llama.cpp/pull/12952", "issue_id": 12952, "merged_at": "2025-04-15T11:45:06Z", "omission_probability": 0.1, "pr_number": 12952, "repo": "ggml-org/llama.cpp", "title": "metal : add FA-vec kernels for head size 96", "total_changes": 44 }
818
diff --git a/Makefile b/Makefile index 1f9455eff0aec..772993ada2707 100644 --- a/Makefile +++ b/Makefile @@ -780,10 +780,6 @@ ifdef GGML_HIP MK_CPPFLAGS += -DGGML_USE_HIP -DGGML_USE_CUDA -ifdef GGML_HIP_UMA - MK_CPPFLAGS += -DGGML_HIP_UMA -endif # GGML_HIP_UMA - MK_LDFLAGS += -L$(ROCM_PATH)/lib -Wl,-rpath=$(ROCM_PATH)/lib MK_LDFLAGS += -L$(ROCM_PATH)/lib64 -Wl,-rpath=$(ROCM_PATH)/lib64 MK_LDFLAGS += -lhipblas -lamdhip64 -lrocblas diff --git a/docs/build.md b/docs/build.md index 3f1b043992545..c9027c0b580a5 100644 --- a/docs/build.md +++ b/docs/build.md @@ -259,8 +259,6 @@ You can download it from your Linux distro's package manager or from here: [ROCm cmake -S . -B build -DGGML_HIP=ON -DAMDGPU_TARGETS=gfx1030 -DCMAKE_BUILD_TYPE=Release \ && cmake --build build --config Release -- -j 16 ``` - On Linux it is also possible to use unified memory architecture (UMA) to share main memory between the CPU and integrated GPU by setting `-DGGML_HIP_UMA=ON`. - However, this hurts performance for non-integrated GPUs (but enables working with integrated GPUs). To enhance flash attention performance on RDNA3+ or CDNA architectures, you can utilize the rocWMMA library by enabling the `-DGGML_HIP_ROCWMMA_FATTN=ON` option. This requires rocWMMA headers to be installed on the build system. @@ -296,6 +294,10 @@ You can download it from your Linux distro's package manager or from here: [ROCm The environment variable [`HIP_VISIBLE_DEVICES`](https://rocm.docs.amd.com/en/latest/understand/gpu_isolation.html#hip-visible-devices) can be used to specify which GPU(s) will be used. If your GPU is not officially supported you can use the environment variable [`HSA_OVERRIDE_GFX_VERSION`] set to a similar GPU, for example 10.3.0 on RDNA2 (e.g. gfx1030, gfx1031, or gfx1035) or 11.0.0 on RDNA3. +### Unified Memory + +On Linux it is possible to use unified memory architecture (UMA) to share main memory between the CPU and integrated GPU by setting environment variable `GGML_CUDA_ENABLE_UNIFIED_MEMORY=1`. However, this hurts performance for non-integrated GPUs (but enables working with integrated GPUs). + ## Vulkan **Windows** diff --git a/ggml/CMakeLists.txt b/ggml/CMakeLists.txt index d33f843b417cf..438c2a7309191 100644 --- a/ggml/CMakeLists.txt +++ b/ggml/CMakeLists.txt @@ -170,7 +170,6 @@ option(GGML_HIP "ggml: use HIP" option(GGML_HIP_GRAPHS "ggml: use HIP graph, experimental, slow" OFF) option(GGML_HIP_NO_VMM "ggml: do not try to use HIP VMM" ON) option(GGML_HIP_ROCWMMA_FATTN "ggml: enable rocWMMA for FlashAttention" OFF) -option(GGML_HIP_UMA "ggml: use HIP unified memory architecture" OFF) option(GGML_VULKAN "ggml: use Vulkan" OFF) option(GGML_VULKAN_CHECK_RESULTS "ggml: run Vulkan op checks" OFF) option(GGML_VULKAN_DEBUG "ggml: enable Vulkan debug output" OFF) diff --git a/ggml/src/ggml-cuda/ggml-cuda.cu b/ggml/src/ggml-cuda/ggml-cuda.cu index 4af1897017567..9ced466512788 100644 --- a/ggml/src/ggml-cuda/ggml-cuda.cu +++ b/ggml/src/ggml-cuda/ggml-cuda.cu @@ -96,31 +96,32 @@ int ggml_cuda_get_device() { static cudaError_t ggml_cuda_device_malloc(void ** ptr, size_t size, int device) { ggml_cuda_set_device(device); -#if defined(GGML_USE_HIP) && defined(GGML_HIP_UMA) - auto res = hipMallocManaged(ptr, size); - if (res == hipSuccess) { - // if error we "need" to know why... - CUDA_CHECK(hipMemAdvise(*ptr, size, hipMemAdviseSetCoarseGrain, device)); - } - return res; -#else - -#if !defined(GGML_USE_HIP) cudaError_t err; if (getenv("GGML_CUDA_ENABLE_UNIFIED_MEMORY") != nullptr) { err = cudaMallocManaged(ptr, size); +#if defined(GGML_USE_HIP) + if (err == hipSuccess) { + CUDA_CHECK(cudaMemAdvise(*ptr, size, hipMemAdviseSetCoarseGrain, device)); + } + + // fall back to cudaMalloc if not supported (e.g. on Windows) + if (err == hipErrorNotSupported) { + static bool warned_unsupported = false; + if (!warned_unsupported) { + GGML_LOG_WARN("hipMallocManaged unsupported, falling back to hipMalloc.\n"); + warned_unsupported = true; + } + + err = cudaMalloc(ptr, size); + } +#endif // defined(GGML_USE_HIP) } else { err = cudaMalloc(ptr, size); } return err; -#else - return cudaMalloc(ptr, size); -#endif // !defined(GGML_USE_HIP) - -#endif } #if defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__) diff --git a/ggml/src/ggml-cuda/vendors/hip.h b/ggml/src/ggml-cuda/vendors/hip.h index 420b41b8d652d..1a28831b7a96b 100644 --- a/ggml/src/ggml-cuda/vendors/hip.h +++ b/ggml/src/ggml-cuda/vendors/hip.h @@ -71,6 +71,8 @@ #define cudaLaunchHostFunc hipLaunchHostFunc #define cudaMalloc hipMalloc #define cudaMallocHost(ptr, size) hipHostMalloc(ptr, size, hipHostMallocDefault) +#define cudaMallocManaged hipMallocManaged +#define cudaMemAdvise hipMemAdvise #define cudaMemcpy hipMemcpy #define cudaMemcpyAsync hipMemcpyAsync #define cudaMemcpyPeerAsync hipMemcpyPeerAsync diff --git a/ggml/src/ggml-hip/CMakeLists.txt b/ggml/src/ggml-hip/CMakeLists.txt index e3762649fd275..1fe8fe3b8d079 100644 --- a/ggml/src/ggml-hip/CMakeLists.txt +++ b/ggml/src/ggml-hip/CMakeLists.txt @@ -89,10 +89,6 @@ endif() add_compile_definitions(GGML_USE_HIP) -if (GGML_HIP_UMA) - add_compile_definitions(GGML_HIP_UMA) -endif() - if (GGML_CUDA_FORCE_MMQ) add_compile_definitions(GGML_CUDA_FORCE_MMQ) endif()
diff --git a/Makefile b/Makefile index 1f9455eff0aec..772993ada2707 100644 --- a/Makefile +++ b/Makefile @@ -780,10 +780,6 @@ ifdef GGML_HIP MK_CPPFLAGS += -DGGML_USE_HIP -DGGML_USE_CUDA -ifdef GGML_HIP_UMA - MK_CPPFLAGS += -DGGML_HIP_UMA -endif # GGML_HIP_UMA MK_LDFLAGS += -L$(ROCM_PATH)/lib -Wl,-rpath=$(ROCM_PATH)/lib MK_LDFLAGS += -L$(ROCM_PATH)/lib64 -Wl,-rpath=$(ROCM_PATH)/lib64 MK_LDFLAGS += -lhipblas -lamdhip64 -lrocblas diff --git a/docs/build.md b/docs/build.md index 3f1b043992545..c9027c0b580a5 100644 --- a/docs/build.md +++ b/docs/build.md @@ -259,8 +259,6 @@ You can download it from your Linux distro's package manager or from here: [ROCm cmake -S . -B build -DGGML_HIP=ON -DAMDGPU_TARGETS=gfx1030 -DCMAKE_BUILD_TYPE=Release \ && cmake --build build --config Release -- -j 16 ``` - On Linux it is also possible to use unified memory architecture (UMA) to share main memory between the CPU and integrated GPU by setting `-DGGML_HIP_UMA=ON`. - However, this hurts performance for non-integrated GPUs (but enables working with integrated GPUs). To enhance flash attention performance on RDNA3+ or CDNA architectures, you can utilize the rocWMMA library by enabling the `-DGGML_HIP_ROCWMMA_FATTN=ON` option. This requires rocWMMA headers to be installed on the build system. @@ -296,6 +294,10 @@ You can download it from your Linux distro's package manager or from here: [ROCm The environment variable [`HIP_VISIBLE_DEVICES`](https://rocm.docs.amd.com/en/latest/understand/gpu_isolation.html#hip-visible-devices) can be used to specify which GPU(s) will be used. If your GPU is not officially supported you can use the environment variable [`HSA_OVERRIDE_GFX_VERSION`] set to a similar GPU, for example 10.3.0 on RDNA2 (e.g. gfx1030, gfx1031, or gfx1035) or 11.0.0 on RDNA3. +### Unified Memory +On Linux it is possible to use unified memory architecture (UMA) to share main memory between the CPU and integrated GPU by setting environment variable `GGML_CUDA_ENABLE_UNIFIED_MEMORY=1`. However, this hurts performance for non-integrated GPUs (but enables working with integrated GPUs). ## Vulkan **Windows** diff --git a/ggml/CMakeLists.txt b/ggml/CMakeLists.txt index d33f843b417cf..438c2a7309191 100644 --- a/ggml/CMakeLists.txt +++ b/ggml/CMakeLists.txt @@ -170,7 +170,6 @@ option(GGML_HIP "ggml: use HIP" option(GGML_HIP_GRAPHS "ggml: use HIP graph, experimental, slow" OFF) option(GGML_HIP_NO_VMM "ggml: do not try to use HIP VMM" ON) option(GGML_HIP_ROCWMMA_FATTN "ggml: enable rocWMMA for FlashAttention" OFF) -option(GGML_HIP_UMA "ggml: use HIP unified memory architecture" OFF) option(GGML_VULKAN "ggml: use Vulkan" OFF) option(GGML_VULKAN_CHECK_RESULTS "ggml: run Vulkan op checks" OFF) option(GGML_VULKAN_DEBUG "ggml: enable Vulkan debug output" OFF) diff --git a/ggml/src/ggml-cuda/ggml-cuda.cu b/ggml/src/ggml-cuda/ggml-cuda.cu index 4af1897017567..9ced466512788 100644 --- a/ggml/src/ggml-cuda/ggml-cuda.cu +++ b/ggml/src/ggml-cuda/ggml-cuda.cu @@ -96,31 +96,32 @@ int ggml_cuda_get_device() { static cudaError_t ggml_cuda_device_malloc(void ** ptr, size_t size, int device) { ggml_cuda_set_device(device); -#if defined(GGML_USE_HIP) && defined(GGML_HIP_UMA) - auto res = hipMallocManaged(ptr, size); - if (res == hipSuccess) { - // if error we "need" to know why... - CUDA_CHECK(hipMemAdvise(*ptr, size, hipMemAdviseSetCoarseGrain, device)); - } - return res; -#if !defined(GGML_USE_HIP) cudaError_t err; if (getenv("GGML_CUDA_ENABLE_UNIFIED_MEMORY") != nullptr) err = cudaMallocManaged(ptr, size); +#if defined(GGML_USE_HIP) + if (err == hipSuccess) { + CUDA_CHECK(cudaMemAdvise(*ptr, size, hipMemAdviseSetCoarseGrain, device)); + if (err == hipErrorNotSupported) { + static bool warned_unsupported = false; + if (!warned_unsupported) { + GGML_LOG_WARN("hipMallocManaged unsupported, falling back to hipMalloc.\n"); + warned_unsupported = true; + } + err = cudaMalloc(ptr, size); +#endif // defined(GGML_USE_HIP) else err = cudaMalloc(ptr, size); return err; - return cudaMalloc(ptr, size); -#endif // !defined(GGML_USE_HIP) -#endif } #if defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__) diff --git a/ggml/src/ggml-cuda/vendors/hip.h b/ggml/src/ggml-cuda/vendors/hip.h index 420b41b8d652d..1a28831b7a96b 100644 --- a/ggml/src/ggml-cuda/vendors/hip.h +++ b/ggml/src/ggml-cuda/vendors/hip.h @@ -71,6 +71,8 @@ #define cudaLaunchHostFunc hipLaunchHostFunc #define cudaMalloc hipMalloc #define cudaMallocHost(ptr, size) hipHostMalloc(ptr, size, hipHostMallocDefault) +#define cudaMemAdvise hipMemAdvise #define cudaMemcpy hipMemcpy #define cudaMemcpyAsync hipMemcpyAsync #define cudaMemcpyPeerAsync hipMemcpyPeerAsync diff --git a/ggml/src/ggml-hip/CMakeLists.txt b/ggml/src/ggml-hip/CMakeLists.txt index e3762649fd275..1fe8fe3b8d079 100644 --- a/ggml/src/ggml-hip/CMakeLists.txt +++ b/ggml/src/ggml-hip/CMakeLists.txt @@ -89,10 +89,6 @@ endif() add_compile_definitions(GGML_USE_HIP) -if (GGML_HIP_UMA) - add_compile_definitions(GGML_HIP_UMA) -endif() if (GGML_CUDA_FORCE_MMQ) add_compile_definitions(GGML_CUDA_FORCE_MMQ) endif()
[ "+ // fall back to cudaMalloc if not supported (e.g. on Windows)", "+#define cudaMallocManaged hipMallocManaged" ]
[ 78, 111 ]
{ "additions": 22, "author": "hjc4869", "deletions": 26, "html_url": "https://github.com/ggml-org/llama.cpp/pull/12934", "issue_id": 12934, "merged_at": "2025-04-15T09:20:38Z", "omission_probability": 0.1, "pr_number": 12934, "repo": "ggml-org/llama.cpp", "title": "CUDA/HIP: Share the same unified memory allocation logic.", "total_changes": 48 }
819
diff --git a/ggml/CMakeLists.txt b/ggml/CMakeLists.txt index fc5eac151b90c..91fd8988dfd3a 100644 --- a/ggml/CMakeLists.txt +++ b/ggml/CMakeLists.txt @@ -153,6 +153,9 @@ option(GGML_CUDA_NO_PEER_COPY "ggml: do not use peer to peer copie option(GGML_CUDA_NO_VMM "ggml: do not try to use CUDA VMM" OFF) option(GGML_CUDA_FA_ALL_QUANTS "ggml: compile all quants for FlashAttention" OFF) option(GGML_CUDA_GRAPHS "ggml: use CUDA graphs (llama.cpp only)" ${GGML_CUDA_GRAPHS_DEFAULT}) +set (GGML_CUDA_COMPRESSION_MODE "size" CACHE STRING + "ggml: cuda link binary compression mode; requires cuda 12.8+") +set_property(CACHE GGML_CUDA_COMPRESSION_MODE PROPERTY STRINGS "none;speed;balance;size") option(GGML_HIP "ggml: use HIP" OFF) option(GGML_HIP_GRAPHS "ggml: use HIP graph, experimental, slow" OFF) diff --git a/ggml/src/ggml-cuda/CMakeLists.txt b/ggml/src/ggml-cuda/CMakeLists.txt index e63ede2fbe3ff..f5d7350e2c30c 100644 --- a/ggml/src/ggml-cuda/CMakeLists.txt +++ b/ggml/src/ggml-cuda/CMakeLists.txt @@ -98,6 +98,15 @@ if (CUDAToolkit_FOUND) set(CUDA_FLAGS -use_fast_math) + if (CUDAToolkit_VERSION VERSION_GREATER_EQUAL "12.8") + # Options are: + # - none (not recommended) + # - speed (nvcc's default) + # - balance + # - size + list(APPEND CUDA_FLAGS -compress-mode=${GGML_CUDA_COMPRESSION_MODE}) + endif() + if (GGML_FATAL_WARNINGS) list(APPEND CUDA_FLAGS -Werror all-warnings) endif()
diff --git a/ggml/CMakeLists.txt b/ggml/CMakeLists.txt index fc5eac151b90c..91fd8988dfd3a 100644 --- a/ggml/CMakeLists.txt +++ b/ggml/CMakeLists.txt @@ -153,6 +153,9 @@ option(GGML_CUDA_NO_PEER_COPY "ggml: do not use peer to peer copie option(GGML_CUDA_NO_VMM "ggml: do not try to use CUDA VMM" OFF) option(GGML_CUDA_FA_ALL_QUANTS "ggml: compile all quants for FlashAttention" OFF) option(GGML_CUDA_GRAPHS "ggml: use CUDA graphs (llama.cpp only)" ${GGML_CUDA_GRAPHS_DEFAULT}) +set (GGML_CUDA_COMPRESSION_MODE "size" CACHE STRING + "ggml: cuda link binary compression mode; requires cuda 12.8+") +set_property(CACHE GGML_CUDA_COMPRESSION_MODE PROPERTY STRINGS "none;speed;balance;size") option(GGML_HIP "ggml: use HIP" OFF) option(GGML_HIP_GRAPHS "ggml: use HIP graph, experimental, slow" OFF) diff --git a/ggml/src/ggml-cuda/CMakeLists.txt b/ggml/src/ggml-cuda/CMakeLists.txt index e63ede2fbe3ff..f5d7350e2c30c 100644 --- a/ggml/src/ggml-cuda/CMakeLists.txt +++ b/ggml/src/ggml-cuda/CMakeLists.txt @@ -98,6 +98,15 @@ if (CUDAToolkit_FOUND) set(CUDA_FLAGS -use_fast_math) + if (CUDAToolkit_VERSION VERSION_GREATER_EQUAL "12.8") + # Options are: + # - none (not recommended) + # - speed (nvcc's default) + # - balance + # - size + list(APPEND CUDA_FLAGS -compress-mode=${GGML_CUDA_COMPRESSION_MODE}) + endif() + if (GGML_FATAL_WARNINGS) list(APPEND CUDA_FLAGS -Werror all-warnings) endif()
[]
[]
{ "additions": 12, "author": "Green-Sky", "deletions": 0, "html_url": "https://github.com/ggml-org/llama.cpp/pull/12029", "issue_id": 12029, "merged_at": "2025-03-01T11:57:22Z", "omission_probability": 0.1, "pr_number": 12029, "repo": "ggml-org/llama.cpp", "title": "CUDA: compress-mode size", "total_changes": 12 }
820
diff --git a/ggml/src/ggml-sycl/ggml-sycl.cpp b/ggml/src/ggml-sycl/ggml-sycl.cpp index 3e48a9244d339..09800d3403fe1 100644 --- a/ggml/src/ggml-sycl/ggml-sycl.cpp +++ b/ggml/src/ggml-sycl/ggml-sycl.cpp @@ -4018,8 +4018,7 @@ static bool ggml_backend_sycl_device_supports_op(ggml_backend_dev_t dev, const g return ggml_is_contiguous(op->src[0]); } case GGML_OP_IM2COL: - // TODO: add support for the new F32 operations - return op->src[0]->type == GGML_TYPE_F16; + return true; case GGML_OP_UPSCALE: return op->src[0]->type == GGML_TYPE_F32 && op->op_params[0] == GGML_SCALE_MODE_NEAREST; case GGML_OP_POOL_2D: diff --git a/ggml/src/ggml-sycl/im2col.cpp b/ggml/src/ggml-sycl/im2col.cpp index 009b42035d026..aa19c2527dc41 100644 --- a/ggml/src/ggml-sycl/im2col.cpp +++ b/ggml/src/ggml-sycl/im2col.cpp @@ -12,110 +12,125 @@ #include "im2col.hpp" +#include <sycl/sycl.hpp> +#include <type_traits> // For std::is_same_v + +#include "ggml.h" + template <typename T> -static void im2col_kernel( - const float *x, T *dst, int64_t batch_offset, int64_t offset_delta, - int64_t IC, int64_t IW, int64_t IH, int64_t OH, int64_t OW, int64_t KW, int64_t KH, - int64_t pelements, int64_t CHW, int s0, int s1, int p0, int p1, int d0, int d1, - const sycl::nd_item<3> &item_ct1) { +static void im2col_kernel(const float * x, T * dst, int64_t batch_offset, int64_t offset_delta, int64_t IC, int64_t IW, + int64_t IH, int64_t OH, int64_t OW, int64_t KW, int64_t KH, int64_t pelements, int64_t CHW, + int s0, int s1, int p0, int p1, int d0, int d1, const sycl::nd_item<3> & item_ct1) { const int64_t work_group_size = item_ct1.get_local_range(2); - const int64_t global_id = item_ct1.get_local_id(2) + work_group_size * item_ct1.get_group(2); + const int64_t global_id = item_ct1.get_local_id(2) + (work_group_size * item_ct1.get_group(2)); // make each work-item deal with more elements since sycl global range can not exceed max int - for (int64_t i = global_id; i < pelements; i += work_group_size * item_ct1.get_group_range(2)) { - + for (int64_t i = global_id; i < pelements; i += (work_group_size * item_ct1.get_group_range(2))) { const int64_t ksize = OW * (KH > 1 ? KW : 1); - const int64_t kx = i / ksize; - const int64_t kd = kx * ksize; - const int64_t ky = (i - kd) / OW; - const int64_t ix = i % OW; - - const int64_t oh = item_ct1.get_group(1); - const int64_t batch = item_ct1.get_group(0) / IC; - const int64_t ic = item_ct1.get_group(0) % IC; - - const int64_t iiw = ix * s0 + kx * d0 - p0; - const int64_t iih = oh * s1 + ky * d1 - p1; - - const int64_t offset_dst = - ((batch * OH + oh) * OW + ix) * CHW + - (ic * (KW * KH) + ky * KW + kx); - - if (iih < 0 || iih >= IH || iiw < 0 || iiw >= IW) { - dst[offset_dst] = - sycl::vec<float, 1>(0.0f) - .convert<sycl::half, sycl::rounding_mode::automatic>()[0]; - } else { - const int64_t offset_src = ic * offset_delta + batch * batch_offset; - dst[offset_dst] = - sycl::vec<float, 1>(x[offset_src + iih * IW + iiw]) - .convert<sycl::half, sycl::rounding_mode::automatic>()[0]; + const int64_t kx = i / ksize; + const int64_t kd = kx * ksize; + const int64_t ky = (i - kd) / OW; + const int64_t ix = i % OW; + + const int64_t oh = item_ct1.get_group(1); + const int64_t batch = item_ct1.get_group(0) / IC; + const int64_t ic = item_ct1.get_group(0) % IC; + + const int64_t iiw = (ix * s0) + (kx * d0) - p0; + const int64_t iih = (oh * s1) + (ky * d1) - p1; + + const int64_t offset_dst = (((batch * OH + oh) * OW + ix) * CHW) + (ic * (KW * KH) + ky * KW + kx); + + const int64_t offset_src_base = (ic * offset_delta) + (batch * batch_offset); + const int64_t offset_src = offset_src_base + (iih * IW) + iiw; + + const bool out_of_bounds = (iih < 0 || iih >= IH || iiw < 0 || iiw >= IW); + const float src_val = out_of_bounds ? 0.0f : x[offset_src]; + + if constexpr (std::is_same_v<T, sycl::half>) { + dst[offset_dst] = sycl::half(src_val); + } else if constexpr (std::is_same_v<T, float>) { + dst[offset_dst] = src_val; } } } template <typename T> -static void im2col_sycl( - const float *x, T *dst, int64_t IW, int64_t IH, int64_t OW, int64_t OH, int64_t KW, - int64_t KH, int64_t IC, int64_t batch, int64_t batch_offset, int64_t offset_delta, - int s0, int s1, int p0, int p1, int d0, int d1, - queue_ptr stream) { +static void im2col_sycl_internal(const float * x, T * dst, int64_t IW, int64_t IH, int64_t OW, int64_t OH, int64_t KW, + int64_t KH, int64_t IC, int64_t batch, int64_t batch_offset, int64_t offset_delta, + int s0, int s1, int p0, int p1, int d0, int d1, queue_ptr stream) { const int64_t parallel_elements = OW * KW * KH; - const int64_t num_blocks = (parallel_elements + SYCL_IM2COL_BLOCK_SIZE - 1) / SYCL_IM2COL_BLOCK_SIZE; + const int64_t num_blocks = (parallel_elements + SYCL_IM2COL_BLOCK_SIZE - 1) / SYCL_IM2COL_BLOCK_SIZE; // decrease global range when it exceeds the max int int64_t local_size = downsample_sycl_global_range(batch * IC * OH * num_blocks, SYCL_IM2COL_BLOCK_SIZE); + sycl::range<3> block_nums(batch * IC, OH, num_blocks); sycl::range<3> local_range(1, 1, local_size); - { - dpct::has_capability_or_fail(stream->get_device(), - {sycl::aspect::fp16}); - - stream->parallel_for( - sycl::nd_range<3>(block_nums * local_range, local_range), - [=](sycl::nd_item<3> item_ct1) { - im2col_kernel(x, dst, batch_offset, offset_delta, IC, IW, IH, OH, OW, KW, KH, - parallel_elements, (IC * KH * KW), s0, s1, p0, - p1, d0, d1, item_ct1); - }); + const int64_t CHW = IC * KH * KW; + + stream->parallel_for(sycl::nd_range<3>(block_nums * local_range, local_range), [=](sycl::nd_item<3> item_ct1) { + im2col_kernel<T>(x, dst, batch_offset, offset_delta, IC, IW, IH, OH, OW, KW, KH, parallel_elements, CHW, s0, s1, + p0, p1, d0, d1, item_ct1); + }); +} + +static void im2col_sycl_f16(const float * x, sycl::half * dst, int64_t IW, int64_t IH, int64_t OW, int64_t OH, + int64_t KW, int64_t KH, int64_t IC, int64_t batch, int64_t batch_offset, + int64_t offset_delta, int s0, int s1, int p0, int p1, int d0, int d1, queue_ptr stream) { + if (!stream->get_device().has(sycl::aspect::fp16)) { + throw sycl::exception(sycl::make_error_code(sycl::errc::kernel_not_supported), + "Device does not support half precision (fp16) operations!"); } + im2col_sycl_internal<sycl::half>(x, dst, IW, IH, OW, OH, KW, KH, IC, batch, batch_offset, offset_delta, s0, s1, p0, + p1, d0, d1, stream); +} + +static void im2col_sycl_f32(const float * x, float * dst, int64_t IW, int64_t IH, int64_t OW, int64_t OH, int64_t KW, + int64_t KH, int64_t IC, int64_t batch, int64_t batch_offset, int64_t offset_delta, int s0, + int s1, int p0, int p1, int d0, int d1, queue_ptr stream) { + im2col_sycl_internal<float>(x, dst, IW, IH, OW, OH, KW, KH, IC, batch, batch_offset, offset_delta, s0, s1, p0, p1, + d0, d1, stream); } -void ggml_sycl_op_im2col(ggml_backend_sycl_context & ctx, ggml_tensor *dst) { +void ggml_sycl_op_im2col(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; const ggml_tensor * src1 = dst->src[1]; - GGML_ASSERT(src0->type == GGML_TYPE_F16); GGML_ASSERT(src1->type == GGML_TYPE_F32); GGML_ASSERT(dst->type == GGML_TYPE_F16 || dst->type == GGML_TYPE_F32); - const int32_t s0 = ((const int32_t*)(dst->op_params))[0]; - const int32_t s1 = ((const int32_t*)(dst->op_params))[1]; - const int32_t p0 = ((const int32_t*)(dst->op_params))[2]; - const int32_t p1 = ((const int32_t*)(dst->op_params))[3]; - const int32_t d0 = ((const int32_t*)(dst->op_params))[4]; - const int32_t d1 = ((const int32_t*)(dst->op_params))[5]; + const int32_t s0 = ((const int32_t *) (dst->op_params))[0]; + const int32_t s1 = ((const int32_t *) (dst->op_params))[1]; + const int32_t p0 = ((const int32_t *) (dst->op_params))[2]; + const int32_t p1 = ((const int32_t *) (dst->op_params))[3]; + const int32_t d0 = ((const int32_t *) (dst->op_params))[4]; + const int32_t d1 = ((const int32_t *) (dst->op_params))[5]; - const bool is_2D = ((const int32_t*)(dst->op_params))[6] == 1; + const bool is_2D = ((const int32_t *) (dst->op_params))[6] == 1; const int64_t IC = src1->ne[is_2D ? 2 : 1]; const int64_t IH = is_2D ? src1->ne[1] : 1; - const int64_t IW = src1->ne[0]; + const int64_t IW = src1->ne[0]; const int64_t KH = is_2D ? src0->ne[1] : 1; - const int64_t KW = src0->ne[0]; + const int64_t KW = src0->ne[0]; const int64_t OH = is_2D ? dst->ne[2] : 1; - const int64_t OW = dst->ne[1]; + const int64_t OW = dst->ne[1]; + + const size_t delta_offset = src1->nb[is_2D ? 2 : 1] / sizeof(float); + const int64_t batch = src1->ne[is_2D ? 3 : 2]; + const size_t batch_offset = src1->nb[is_2D ? 3 : 2] / sizeof(float); - const size_t delta_offset = src1->nb[is_2D ? 2 : 1] / 4; // nb is byte offset, src is type float32 - const int64_t batch = src1->ne[3]; - const size_t batch_offset = src1->nb[3] / 4; // nb is byte offset, src is type float32 + queue_ptr stream = ctx.stream(); if (dst->type == GGML_TYPE_F16) { - im2col_sycl((const float *) src1->data, (sycl::half *)dst->data, IW, IH, OW, OH, KW, KH, IC, batch, batch_offset, delta_offset, s0, s1, p0, p1, d0, d1, ctx.stream()); + im2col_sycl_f16((const float *) src1->data, (sycl::half *) dst->data, IW, IH, OW, OH, KW, KH, IC, batch, + batch_offset, delta_offset, s0, s1, p0, p1, d0, d1, stream); } else { - im2col_sycl((const float *) src1->data, (float *)dst->data, IW, IH, OW, OH, KW, KH, IC, batch, batch_offset, delta_offset, s0, s1, p0, p1, d0, d1, ctx.stream()); + im2col_sycl_f32((const float *) src1->data, (float *) dst->data, IW, IH, OW, OH, KW, KH, IC, batch, + batch_offset, delta_offset, s0, s1, p0, p1, d0, d1, stream); } }
diff --git a/ggml/src/ggml-sycl/ggml-sycl.cpp b/ggml/src/ggml-sycl/ggml-sycl.cpp index 3e48a9244d339..09800d3403fe1 100644 --- a/ggml/src/ggml-sycl/ggml-sycl.cpp +++ b/ggml/src/ggml-sycl/ggml-sycl.cpp @@ -4018,8 +4018,7 @@ static bool ggml_backend_sycl_device_supports_op(ggml_backend_dev_t dev, const g return ggml_is_contiguous(op->src[0]); } case GGML_OP_IM2COL: - // TODO: add support for the new F32 operations - return op->src[0]->type == GGML_TYPE_F16; case GGML_OP_UPSCALE: return op->src[0]->type == GGML_TYPE_F32 && op->op_params[0] == GGML_SCALE_MODE_NEAREST; case GGML_OP_POOL_2D: diff --git a/ggml/src/ggml-sycl/im2col.cpp b/ggml/src/ggml-sycl/im2col.cpp index 009b42035d026..aa19c2527dc41 100644 --- a/ggml/src/ggml-sycl/im2col.cpp +++ b/ggml/src/ggml-sycl/im2col.cpp @@ -12,110 +12,125 @@ #include "im2col.hpp" +#include <sycl/sycl.hpp> +#include <type_traits> // For std::is_same_v +#include "ggml.h" -static void im2col_kernel( - const float *x, T *dst, int64_t batch_offset, int64_t offset_delta, - int64_t IC, int64_t IW, int64_t IH, int64_t OH, int64_t OW, int64_t KW, int64_t KH, - int64_t pelements, int64_t CHW, int s0, int s1, int p0, int p1, int d0, int d1, - const sycl::nd_item<3> &item_ct1) { + int64_t IH, int64_t OH, int64_t OW, int64_t KW, int64_t KH, int64_t pelements, int64_t CHW, const int64_t work_group_size = item_ct1.get_local_range(2); - const int64_t global_id = item_ct1.get_local_id(2) + work_group_size * item_ct1.get_group(2); + const int64_t global_id = item_ct1.get_local_id(2) + (work_group_size * item_ct1.get_group(2)); // make each work-item deal with more elements since sycl global range can not exceed max int - for (int64_t i = global_id; i < pelements; i += work_group_size * item_ct1.get_group_range(2)) { + for (int64_t i = global_id; i < pelements; i += (work_group_size * item_ct1.get_group_range(2))) { const int64_t ksize = OW * (KH > 1 ? KW : 1); - const int64_t kx = i / ksize; - const int64_t ky = (i - kd) / OW; - const int64_t ix = i % OW; - const int64_t oh = item_ct1.get_group(1); - const int64_t batch = item_ct1.get_group(0) / IC; - const int64_t iiw = ix * s0 + kx * d0 - p0; - const int64_t iih = oh * s1 + ky * d1 - p1; - const int64_t offset_dst = - ((batch * OH + oh) * OW + ix) * CHW + - (ic * (KW * KH) + ky * KW + kx); - if (iih < 0 || iih >= IH || iiw < 0 || iiw >= IW) { - sycl::vec<float, 1>(0.0f) - } else { - const int64_t offset_src = ic * offset_delta + batch * batch_offset; - sycl::vec<float, 1>(x[offset_src + iih * IW + iiw]) + const int64_t kx = i / ksize; + const int64_t kd = kx * ksize; + const int64_t ky = (i - kd) / OW; + const int64_t ix = i % OW; + const int64_t oh = item_ct1.get_group(1); + const int64_t batch = item_ct1.get_group(0) / IC; + const int64_t ic = item_ct1.get_group(0) % IC; + const int64_t iiw = (ix * s0) + (kx * d0) - p0; + const int64_t iih = (oh * s1) + (ky * d1) - p1; + const int64_t offset_src_base = (ic * offset_delta) + (batch * batch_offset); + const int64_t offset_src = offset_src_base + (iih * IW) + iiw; + const bool out_of_bounds = (iih < 0 || iih >= IH || iiw < 0 || iiw >= IW); + const float src_val = out_of_bounds ? 0.0f : x[offset_src]; + if constexpr (std::is_same_v<T, sycl::half>) { + dst[offset_dst] = sycl::half(src_val); + } else if constexpr (std::is_same_v<T, float>) { + dst[offset_dst] = src_val; } -static void im2col_sycl( - queue_ptr stream) { +static void im2col_sycl_internal(const float * x, T * dst, int64_t IW, int64_t IH, int64_t OW, int64_t OH, int64_t KW, + int64_t KH, int64_t IC, int64_t batch, int64_t batch_offset, int64_t offset_delta, + int s0, int s1, int p0, int p1, int d0, int d1, queue_ptr stream) { const int64_t parallel_elements = OW * KW * KH; - const int64_t num_blocks = (parallel_elements + SYCL_IM2COL_BLOCK_SIZE - 1) / SYCL_IM2COL_BLOCK_SIZE; + const int64_t num_blocks = (parallel_elements + SYCL_IM2COL_BLOCK_SIZE - 1) / SYCL_IM2COL_BLOCK_SIZE; // decrease global range when it exceeds the max int int64_t local_size = downsample_sycl_global_range(batch * IC * OH * num_blocks, SYCL_IM2COL_BLOCK_SIZE); sycl::range<3> block_nums(batch * IC, OH, num_blocks); sycl::range<3> local_range(1, 1, local_size); - { - dpct::has_capability_or_fail(stream->get_device(), - {sycl::aspect::fp16}); - stream->parallel_for( - sycl::nd_range<3>(block_nums * local_range, local_range), - [=](sycl::nd_item<3> item_ct1) { - im2col_kernel(x, dst, batch_offset, offset_delta, IC, IW, IH, OH, OW, KW, KH, - parallel_elements, (IC * KH * KW), s0, s1, p0, - p1, d0, d1, item_ct1); - }); + const int64_t CHW = IC * KH * KW; + stream->parallel_for(sycl::nd_range<3>(block_nums * local_range, local_range), [=](sycl::nd_item<3> item_ct1) { + }); +static void im2col_sycl_f16(const float * x, sycl::half * dst, int64_t IW, int64_t IH, int64_t OW, int64_t OH, + int64_t KW, int64_t KH, int64_t IC, int64_t batch, int64_t batch_offset, + int64_t offset_delta, int s0, int s1, int p0, int p1, int d0, int d1, queue_ptr stream) { + if (!stream->get_device().has(sycl::aspect::fp16)) { + throw sycl::exception(sycl::make_error_code(sycl::errc::kernel_not_supported), + im2col_sycl_internal<sycl::half>(x, dst, IW, IH, OW, OH, KW, KH, IC, batch, batch_offset, offset_delta, s0, s1, p0, + p1, d0, d1, stream); +static void im2col_sycl_f32(const float * x, float * dst, int64_t IW, int64_t IH, int64_t OW, int64_t OH, int64_t KW, + int64_t KH, int64_t IC, int64_t batch, int64_t batch_offset, int64_t offset_delta, int s0, + int s1, int p0, int p1, int d0, int d1, queue_ptr stream) { + im2col_sycl_internal<float>(x, dst, IW, IH, OW, OH, KW, KH, IC, batch, batch_offset, offset_delta, s0, s1, p0, p1, + d0, d1, stream); -void ggml_sycl_op_im2col(ggml_backend_sycl_context & ctx, ggml_tensor *dst) { +void ggml_sycl_op_im2col(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; const ggml_tensor * src1 = dst->src[1]; - GGML_ASSERT(src0->type == GGML_TYPE_F16); GGML_ASSERT(src1->type == GGML_TYPE_F32); GGML_ASSERT(dst->type == GGML_TYPE_F16 || dst->type == GGML_TYPE_F32); - const int32_t s0 = ((const int32_t*)(dst->op_params))[0]; - const int32_t s1 = ((const int32_t*)(dst->op_params))[1]; - const int32_t p1 = ((const int32_t*)(dst->op_params))[3]; - const int32_t d0 = ((const int32_t*)(dst->op_params))[4]; - const int32_t d1 = ((const int32_t*)(dst->op_params))[5]; + const int32_t s0 = ((const int32_t *) (dst->op_params))[0]; + const int32_t s1 = ((const int32_t *) (dst->op_params))[1]; + const int32_t p0 = ((const int32_t *) (dst->op_params))[2]; + const int32_t p1 = ((const int32_t *) (dst->op_params))[3]; + const int32_t d0 = ((const int32_t *) (dst->op_params))[4]; + const int32_t d1 = ((const int32_t *) (dst->op_params))[5]; const int64_t IC = src1->ne[is_2D ? 2 : 1]; const int64_t IH = is_2D ? src1->ne[1] : 1; - const int64_t IW = src1->ne[0]; + const int64_t IW = src1->ne[0]; const int64_t KH = is_2D ? src0->ne[1] : 1; - const int64_t KW = src0->ne[0]; + const int64_t KW = src0->ne[0]; const int64_t OH = is_2D ? dst->ne[2] : 1; - const int64_t OW = dst->ne[1]; + const int64_t OW = dst->ne[1]; + const size_t delta_offset = src1->nb[is_2D ? 2 : 1] / sizeof(float); + const int64_t batch = src1->ne[is_2D ? 3 : 2]; + const size_t batch_offset = src1->nb[is_2D ? 3 : 2] / sizeof(float); - const size_t delta_offset = src1->nb[is_2D ? 2 : 1] / 4; // nb is byte offset, src is type float32 - const size_t batch_offset = src1->nb[3] / 4; // nb is byte offset, src is type float32 + queue_ptr stream = ctx.stream(); if (dst->type == GGML_TYPE_F16) { - im2col_sycl((const float *) src1->data, (sycl::half *)dst->data, IW, IH, OW, OH, KW, KH, IC, batch, batch_offset, delta_offset, s0, s1, p0, p1, d0, d1, ctx.stream()); } else { - im2col_sycl((const float *) src1->data, (float *)dst->data, IW, IH, OW, OH, KW, KH, IC, batch, batch_offset, delta_offset, s0, s1, p0, p1, d0, d1, ctx.stream()); + im2col_sycl_f32((const float *) src1->data, (float *) dst->data, IW, IH, OW, OH, KW, KH, IC, batch,
[ "+ return true;", "+static void im2col_kernel(const float * x, T * dst, int64_t batch_offset, int64_t offset_delta, int64_t IC, int64_t IW,", "+ int s0, int s1, int p0, int p1, int d0, int d1, const sycl::nd_item<3> & item_ct1) {", "- const int64_t kd = kx * ksize;", "- const int64_t ic = item_ct1.get_group(0) % IC;", "+ const int64_t offset_dst = (((batch * OH + oh) * OW + ix) * CHW) + (ic * (KW * KH) + ky * KW + kx);", "- const float *x, T *dst, int64_t IW, int64_t IH, int64_t OW, int64_t OH, int64_t KW,", "- int64_t KH, int64_t IC, int64_t batch, int64_t batch_offset, int64_t offset_delta,", "- int s0, int s1, int p0, int p1, int d0, int d1,", "+ im2col_kernel<T>(x, dst, batch_offset, offset_delta, IC, IW, IH, OH, OW, KW, KH, parallel_elements, CHW, s0, s1,", "+ p0, p1, d0, d1, item_ct1);", "+ \"Device does not support half precision (fp16) operations!\");", "- const int32_t p0 = ((const int32_t*)(dst->op_params))[2];", "- const bool is_2D = ((const int32_t*)(dst->op_params))[6] == 1;", "+ const bool is_2D = ((const int32_t *) (dst->op_params))[6] == 1;", "- const int64_t batch = src1->ne[3];", "+ im2col_sycl_f16((const float *) src1->data, (sycl::half *) dst->data, IW, IH, OW, OH, KW, KH, IC, batch," ]
[ 10, 33, 35, 46, 52, 82, 100, 101, 102, 131, 132, 141, 165, 176, 177, 197, 203 ]
{ "additions": 83, "author": "qnixsynapse", "deletions": 69, "html_url": "https://github.com/ggml-org/llama.cpp/pull/12910", "issue_id": 12910, "merged_at": "2025-04-14T12:23:53Z", "omission_probability": 0.1, "pr_number": 12910, "repo": "ggml-org/llama.cpp", "title": "SYCL: Fix im2col", "total_changes": 152 }
821
diff --git a/ggml/src/ggml-rpc/ggml-rpc.cpp b/ggml/src/ggml-rpc/ggml-rpc.cpp index 862b9b666175d..3189ae85d55f9 100644 --- a/ggml/src/ggml-rpc/ggml-rpc.cpp +++ b/ggml/src/ggml-rpc/ggml-rpc.cpp @@ -1,6 +1,7 @@ #include "ggml-rpc.h" #include "ggml-impl.h" #include "ggml-backend-impl.h" +#include "ggml-cpp.h" #include <cinttypes> #include <string> @@ -853,12 +854,13 @@ bool rpc_server::get_alloc_size(const rpc_msg_get_alloc_size_req & request, rpc_ /*.no_alloc =*/ true, }; - struct ggml_context * ctx = ggml_init(params); + ggml_context_ptr ctx_ptr { ggml_init(params) }; + GGML_ASSERT(ctx_ptr != nullptr); + ggml_context * ctx = ctx_ptr.get(); ggml_tensor * tensor = deserialize_tensor(ctx, &request.tensor); if (tensor == nullptr) { GGML_LOG_ERROR("Null tensor pointer passed to server get_alloc_size function.\n"); - ggml_free(ctx); return false; } @@ -871,7 +873,6 @@ bool rpc_server::get_alloc_size(const rpc_msg_get_alloc_size_req & request, rpc_ response.alloc_size = ggml_backend_buft_get_alloc_size(buft,tensor); - ggml_free(ctx); return true; } @@ -985,11 +986,12 @@ bool rpc_server::set_tensor(const std::vector<uint8_t> & input) { /*.mem_buffer =*/ NULL, /*.no_alloc =*/ true, }; - struct ggml_context * ctx = ggml_init(params); + ggml_context_ptr ctx_ptr { ggml_init(params) }; + GGML_ASSERT(ctx_ptr != nullptr); + ggml_context * ctx = ctx_ptr.get(); ggml_tensor * tensor = deserialize_tensor(ctx, in_tensor); if (tensor == nullptr) { GGML_LOG_ERROR("[%s] error deserializing tensor\n", __func__); - ggml_free(ctx); return false; } GGML_PRINT_DEBUG("[%s] buffer: %p, data: %p, offset: %" PRIu64 ", size: %zu\n", __func__, (void*)tensor->buffer, tensor->data, offset, size); @@ -1016,7 +1018,6 @@ bool rpc_server::set_tensor(const std::vector<uint8_t> & input) { printf("[%s] saved to '%s'\n", __func__, cache_file.c_str()); } ggml_backend_tensor_set(tensor, data, offset, size); - ggml_free(ctx); return true; } @@ -1060,11 +1061,12 @@ bool rpc_server::set_tensor_hash(const std::vector<uint8_t> & input, rpc_msg_set /*.mem_buffer =*/ NULL, /*.no_alloc =*/ true, }; - struct ggml_context * ctx = ggml_init(params); + ggml_context_ptr ctx_ptr { ggml_init(params) }; + GGML_ASSERT(ctx_ptr != nullptr); + ggml_context * ctx = ctx_ptr.get(); ggml_tensor * tensor = deserialize_tensor(ctx, in_tensor); if (tensor == nullptr) { GGML_LOG_ERROR("[%s] error deserializing tensor\n", __func__); - ggml_free(ctx); return false; } GGML_PRINT_DEBUG("[%s] buffer: %p, data: %p, offset: %" PRIu64 ", size: %zu, hash: %" PRIx64 "\n", __func__, (void*)tensor->buffer, tensor->data, offset, size, *hash); @@ -1080,7 +1082,6 @@ bool rpc_server::set_tensor_hash(const std::vector<uint8_t> & input, rpc_msg_set } ggml_backend_tensor_set(tensor, cached_file.data(), offset, size); response.result = 1; - ggml_free(ctx); return true; } @@ -1090,11 +1091,12 @@ bool rpc_server::init_tensor(const rpc_msg_init_tensor_req & request) { /*.mem_buffer =*/ NULL, /*.no_alloc =*/ true, }; - struct ggml_context * ctx = ggml_init(params); + ggml_context_ptr ctx_ptr { ggml_init(params) }; + GGML_ASSERT(ctx_ptr != nullptr); + ggml_context * ctx = ctx_ptr.get(); ggml_tensor * tensor = deserialize_tensor(ctx, &request.tensor); if (tensor == nullptr) { GGML_LOG_ERROR("Null tensor pointer passed to server init_tensor function.\n"); - ggml_free(ctx); return false; } @@ -1110,11 +1112,9 @@ bool rpc_server::init_tensor(const rpc_msg_init_tensor_req & request) { // This pointer can either be passed around client/server, or probably better stored server-side and kept track of. // Currently unimplemented. GGML_LOG_ERROR("tensor->extra populated by the backend, this is currently unsupported.\n"); - ggml_free(ctx); return false; } - ggml_free(ctx); return true; } @@ -1124,11 +1124,12 @@ bool rpc_server::get_tensor(const rpc_msg_get_tensor_req & request, std::vector< /*.mem_buffer =*/ NULL, /*.no_alloc =*/ true, }; - struct ggml_context * ctx = ggml_init(params); + ggml_context_ptr ctx_ptr { ggml_init(params) }; + GGML_ASSERT(ctx_ptr != nullptr); + ggml_context * ctx = ctx_ptr.get(); ggml_tensor * tensor = deserialize_tensor(ctx, &request.tensor); if (tensor == nullptr) { GGML_LOG_ERROR("[%s] error deserializing tensor\n", __func__); - ggml_free(ctx); return false; } GGML_PRINT_DEBUG("[%s] buffer: %p, data: %p, offset: %" PRIu64 ", size: %" PRIu64 "\n", __func__, (void*)tensor->buffer, tensor->data, request.offset, request.size); @@ -1147,7 +1148,6 @@ bool rpc_server::get_tensor(const rpc_msg_get_tensor_req & request, std::vector< response.resize(request.size, 0); ggml_backend_tensor_get(tensor, response.data(), request.offset, request.size); - ggml_free(ctx); return true; } @@ -1157,12 +1157,14 @@ bool rpc_server::copy_tensor(const rpc_msg_copy_tensor_req & request, rpc_msg_co /*.mem_buffer =*/ NULL, /*.no_alloc =*/ true, }; - struct ggml_context * ctx = ggml_init(params); + ggml_context_ptr ctx_ptr { ggml_init(params) }; + GGML_ASSERT(ctx_ptr != nullptr); + ggml_context * ctx = ctx_ptr.get(); + ggml_tensor * src = deserialize_tensor(ctx, &request.src); ggml_tensor * dst = deserialize_tensor(ctx, &request.dst); if (src == nullptr || dst == nullptr) { GGML_LOG_ERROR("[%s] error deserializing tensors\n", __func__); - ggml_free(ctx); return false; } @@ -1180,7 +1182,6 @@ bool rpc_server::copy_tensor(const rpc_msg_copy_tensor_req & request, rpc_msg_co dst_data + src_size, dst_base, dst_base + dst_buf_sz); - ggml_free(ctx); return false; } @@ -1188,7 +1189,6 @@ bool rpc_server::copy_tensor(const rpc_msg_copy_tensor_req & request, rpc_msg_co __func__, (void*) src->buffer, (void*) dst->buffer); response.result = ggml_backend_buffer_copy_tensor(src, dst); - ggml_free(ctx); return true; } @@ -1242,7 +1242,9 @@ bool rpc_server::graph_compute(const std::vector<uint8_t> & input, rpc_msg_graph /*.mem_buffer =*/ NULL, /*.no_alloc =*/ true, }; - struct ggml_context * ctx = ggml_init(params); + ggml_context_ptr ctx_ptr { ggml_init(params) }; + GGML_ASSERT(ctx_ptr != nullptr); + ggml_context * ctx = ctx_ptr.get(); struct ggml_cgraph * graph = ggml_new_graph_custom(ctx, n_nodes, false); graph->n_nodes = n_nodes; std::unordered_map<uint64_t, const rpc_tensor*> tensor_ptrs; @@ -1257,7 +1259,6 @@ bool rpc_server::graph_compute(const std::vector<uint8_t> & input, rpc_msg_graph } ggml_status status = ggml_backend_graph_compute(backend, graph); response.result = status; - ggml_free(ctx); return true; }
diff --git a/ggml/src/ggml-rpc/ggml-rpc.cpp b/ggml/src/ggml-rpc/ggml-rpc.cpp index 862b9b666175d..3189ae85d55f9 100644 --- a/ggml/src/ggml-rpc/ggml-rpc.cpp +++ b/ggml/src/ggml-rpc/ggml-rpc.cpp @@ -1,6 +1,7 @@ #include "ggml-rpc.h" #include "ggml-impl.h" #include "ggml-backend-impl.h" +#include "ggml-cpp.h" #include <cinttypes> #include <string> @@ -853,12 +854,13 @@ bool rpc_server::get_alloc_size(const rpc_msg_get_alloc_size_req & request, rpc_ GGML_LOG_ERROR("Null tensor pointer passed to server get_alloc_size function.\n"); @@ -871,7 +873,6 @@ bool rpc_server::get_alloc_size(const rpc_msg_get_alloc_size_req & request, rpc_ response.alloc_size = ggml_backend_buft_get_alloc_size(buft,tensor); @@ -985,11 +986,12 @@ bool rpc_server::set_tensor(const std::vector<uint8_t> & input) { GGML_PRINT_DEBUG("[%s] buffer: %p, data: %p, offset: %" PRIu64 ", size: %zu\n", __func__, (void*)tensor->buffer, tensor->data, offset, size); @@ -1016,7 +1018,6 @@ bool rpc_server::set_tensor(const std::vector<uint8_t> & input) { printf("[%s] saved to '%s'\n", __func__, cache_file.c_str()); ggml_backend_tensor_set(tensor, data, offset, size); @@ -1060,11 +1061,12 @@ bool rpc_server::set_tensor_hash(const std::vector<uint8_t> & input, rpc_msg_set GGML_PRINT_DEBUG("[%s] buffer: %p, data: %p, offset: %" PRIu64 ", size: %zu, hash: %" PRIx64 "\n", __func__, (void*)tensor->buffer, tensor->data, offset, size, *hash); @@ -1080,7 +1082,6 @@ bool rpc_server::set_tensor_hash(const std::vector<uint8_t> & input, rpc_msg_set ggml_backend_tensor_set(tensor, cached_file.data(), offset, size); response.result = 1; @@ -1090,11 +1091,12 @@ bool rpc_server::init_tensor(const rpc_msg_init_tensor_req & request) { GGML_LOG_ERROR("Null tensor pointer passed to server init_tensor function.\n"); @@ -1110,11 +1112,9 @@ bool rpc_server::init_tensor(const rpc_msg_init_tensor_req & request) { // This pointer can either be passed around client/server, or probably better stored server-side and kept track of. // Currently unimplemented. GGML_LOG_ERROR("tensor->extra populated by the backend, this is currently unsupported.\n"); @@ -1124,11 +1124,12 @@ bool rpc_server::get_tensor(const rpc_msg_get_tensor_req & request, std::vector< GGML_PRINT_DEBUG("[%s] buffer: %p, data: %p, offset: %" PRIu64 ", size: %" PRIu64 "\n", __func__, (void*)tensor->buffer, tensor->data, request.offset, request.size); @@ -1147,7 +1148,6 @@ bool rpc_server::get_tensor(const rpc_msg_get_tensor_req & request, std::vector< response.resize(request.size, 0); ggml_backend_tensor_get(tensor, response.data(), request.offset, request.size); @@ -1157,12 +1157,14 @@ bool rpc_server::copy_tensor(const rpc_msg_copy_tensor_req & request, rpc_msg_co + ggml_tensor * src = deserialize_tensor(ctx, &request.src); ggml_tensor * dst = deserialize_tensor(ctx, &request.dst); if (src == nullptr || dst == nullptr) { GGML_LOG_ERROR("[%s] error deserializing tensors\n", __func__); @@ -1180,7 +1182,6 @@ bool rpc_server::copy_tensor(const rpc_msg_copy_tensor_req & request, rpc_msg_co dst_data + src_size, dst_base, dst_base + dst_buf_sz); @@ -1188,7 +1189,6 @@ bool rpc_server::copy_tensor(const rpc_msg_copy_tensor_req & request, rpc_msg_co __func__, (void*) src->buffer, (void*) dst->buffer); response.result = ggml_backend_buffer_copy_tensor(src, dst); @@ -1242,7 +1242,9 @@ bool rpc_server::graph_compute(const std::vector<uint8_t> & input, rpc_msg_graph struct ggml_cgraph * graph = ggml_new_graph_custom(ctx, n_nodes, false); graph->n_nodes = n_nodes; std::unordered_map<uint64_t, const rpc_tensor*> tensor_ptrs; @@ -1257,7 +1259,6 @@ bool rpc_server::graph_compute(const std::vector<uint8_t> & input, rpc_msg_graph ggml_status status = ggml_backend_graph_compute(backend, graph); response.result = status;
[]
[]
{ "additions": 23, "author": "rgerganov", "deletions": 22, "html_url": "https://github.com/ggml-org/llama.cpp/pull/12938", "issue_id": 12938, "merged_at": "2025-04-14T10:59:34Z", "omission_probability": 0.1, "pr_number": 12938, "repo": "ggml-org/llama.cpp", "title": "rpc : use ggml_context_ptr", "total_changes": 45 }
822
diff --git a/ggml/src/ggml-cpu/ggml-cpu-aarch64.cpp b/ggml/src/ggml-cpu/ggml-cpu-aarch64.cpp index 74a31abb2d6fc..ced09c05c2ccb 100644 --- a/ggml/src/ggml-cpu/ggml-cpu-aarch64.cpp +++ b/ggml/src/ggml-cpu/ggml-cpu-aarch64.cpp @@ -183,67 +183,63 @@ static inline __m256 __avx_rearranged_f32cx8_load(ggml_fp16_t *x, __m128i arrang #if defined(__AVX2__) || defined(__AVX512F__) #if defined(__AVX512F__) -// add int16_t pairwise and return as 512 bit int vector -static inline __m512i sum_i16_pairs_int_32x16(const __m512i x) { +// add int16_t pairwise and return as 512 bit int vector, then add the accumulator +static inline __m512i sum_i16_pairs_acc_int32x16(const __m512i acc, const __m512i x) { const __m512i ones = _mm512_set1_epi16(1); - return _mm512_madd_epi16(ones, x); + return _mm512_add_epi32(acc, _mm512_madd_epi16(ones, x)); } -static inline __m512i mul_sum_us8_pairs_int32x16(const __m512i ax, const __m512i sy) { +static inline __m512i mul_sum_us8_pairs_acc_int32x16(const __m512i acc, const __m512i ax, const __m512i sy) { #if defined(__AVX512VNNI__) - const __m512i zero = _mm512_setzero_si512(); - return _mm512_dpbusd_epi32(zero, ax, sy); + return _mm512_dpbusd_epi32(acc, ax, sy); #else // Perform multiplication and create 16-bit values const __m512i dot = _mm512_maddubs_epi16(ax, sy); - return sum_i16_pairs_int_32x16(dot); + return sum_i16_pairs_acc_int32x16(acc, dot); #endif } -// multiply int8_t, add results pairwise twice and return as 512 bit int vector -static inline __m512i mul_sum_i8_pairs_int32x16(const __m512i x, const __m512i y) { +// multiply int8_t, add results pairwise twice and return as 512 bit int vector,then add the accumulator +static inline __m512i mul_sum_i8_pairs_acc_int32x16(const __m512i acc, const __m512i x, const __m512i y) { const __m512i zero = _mm512_setzero_si512(); // Get absolute values of x vectors const __m512i ax = _mm512_abs_epi8(x); // Sign the values of the y vectors __mmask64 blt0 = _mm512_movepi8_mask(x); const __m512i sy = _mm512_mask_sub_epi8(y, blt0, zero, y); - return mul_sum_us8_pairs_int32x16(ax, sy); + return mul_sum_us8_pairs_acc_int32x16(acc, ax, sy); } #endif -// add int16_t pairwise and return as 256 bit int vector -static inline __m256i sum_i16_pairs_int32x8(const __m256i x) { +// add int16_t pairwise and return as 256 bit int vector, then add the accumulator +static inline __m256i sum_i16_pairs_acc_int32x8(const __m256i acc, const __m256i x) { const __m256i ones = _mm256_set1_epi16(1); - return _mm256_madd_epi16(ones, x); + return _mm256_add_epi32(acc, _mm256_madd_epi16(ones, x)); } -static inline __m256i mul_sum_us8_pairs_int32x8(const __m256i ax, const __m256i sy) { +static inline __m256i mul_sum_us8_pairs_acc_int32x8(const __m256i acc, const __m256i ax, const __m256i sy) { #if defined(__AVX512VNNI__) && defined(__AVX512VL__) - const __m256i zero = _mm256_setzero_si256(); - return _mm256_dpbusd_epi32(zero, ax, sy); + return _mm256_dpbusd_epi32(acc, ax, sy); #elif defined(__AVXVNNI__) - const __m256i zero = _mm256_setzero_si256(); - return _mm256_dpbusd_avx_epi32(zero, ax, sy); + return _mm256_dpbusd_avx_epi32(acc, ax, sy); #else // Perform multiplication and create 16-bit values const __m256i dot = _mm256_maddubs_epi16(ax, sy); - return sum_i16_pairs_int32x8(dot); + return sum_i16_pairs_acc_int32x8(acc, dot); #endif } // Integer variant of the function defined in ggml-quants.c -// multiply int8_t, add results pairwise twice and return as 256 bit int vector -static inline __m256i mul_sum_i8_pairs_int32x8(const __m256i x, const __m256i y) { -#if __AVXVNNIINT8__ - const __m256i zero = _mm256_setzero_si256(); - return _mm256_dpbssd_epi32(zero, x, y); +// multiply int8_t, add results pairwise twice and return as 256 bit int vector, then add the accumulator +static inline __m256i mul_sum_i8_pairs_acc_int32x8(const __m256i acc, const __m256i x, const __m256i y) { +#if defined(__AVXVNNIINT8__) + return _mm256_dpbssd_epi32(acc, x, y); #else // Get absolute values of x vectors const __m256i ax = _mm256_sign_epi8(x, x); // Sign the values of the y vectors const __m256i sy = _mm256_sign_epi8(y, x); - return mul_sum_us8_pairs_int32x8(ax, sy); + return mul_sum_us8_pairs_acc_int32x8(acc, ax, sy); #endif } #endif @@ -1175,17 +1171,17 @@ static void ggml_gemv_q4_0_8x8_q8_0(int n, float * GGML_RESTRICT s, size_t bs, c // ........................................................................... // B0(28-31) B4(28-31) B1(28-31) B5(28-31) B2(28-31) B6(28-31) B3(28-31) B7(28-31) with A0(28-31) - iacc = _mm256_add_epi32(iacc, mul_sum_i8_pairs_int32x8(_mm256_blend_epi32(rhs_vec_0123_0 ,_mm256_shuffle_epi32(rhs_vec_4567_0, 177), 170), _mm256_shuffle_epi32(lhs_vec_0, 0))); - iacc = _mm256_add_epi32(iacc, mul_sum_i8_pairs_int32x8(_mm256_blend_epi32(_mm256_shuffle_epi32(rhs_vec_0123_0, 177) ,rhs_vec_4567_0, 170), _mm256_shuffle_epi32(lhs_vec_0, 85))); + iacc = mul_sum_i8_pairs_acc_int32x8(iacc, _mm256_blend_epi32(rhs_vec_0123_0 ,_mm256_shuffle_epi32(rhs_vec_4567_0, 177), 170), _mm256_shuffle_epi32(lhs_vec_0, 0)); + iacc = mul_sum_i8_pairs_acc_int32x8(iacc, _mm256_blend_epi32(_mm256_shuffle_epi32(rhs_vec_0123_0, 177) ,rhs_vec_4567_0, 170), _mm256_shuffle_epi32(lhs_vec_0, 85)); - iacc = _mm256_add_epi32(iacc, mul_sum_i8_pairs_int32x8(_mm256_blend_epi32(rhs_vec_0123_1 ,_mm256_shuffle_epi32(rhs_vec_4567_1, 177), 170), _mm256_shuffle_epi32(lhs_vec_0, 170))); - iacc = _mm256_add_epi32(iacc, mul_sum_i8_pairs_int32x8(_mm256_blend_epi32(_mm256_shuffle_epi32(rhs_vec_0123_1, 177) ,rhs_vec_4567_1, 170), _mm256_shuffle_epi32(lhs_vec_0, 255))); + iacc = mul_sum_i8_pairs_acc_int32x8(iacc, _mm256_blend_epi32(rhs_vec_0123_1 ,_mm256_shuffle_epi32(rhs_vec_4567_1, 177), 170), _mm256_shuffle_epi32(lhs_vec_0, 170)); + iacc = mul_sum_i8_pairs_acc_int32x8(iacc, _mm256_blend_epi32(_mm256_shuffle_epi32(rhs_vec_0123_1, 177) ,rhs_vec_4567_1, 170), _mm256_shuffle_epi32(lhs_vec_0, 255)); - iacc = _mm256_add_epi32(iacc, mul_sum_i8_pairs_int32x8(_mm256_blend_epi32(rhs_vec_0123_2 ,_mm256_shuffle_epi32(rhs_vec_4567_2, 177), 170), _mm256_shuffle_epi32(lhs_vec_1, 0))); - iacc = _mm256_add_epi32(iacc, mul_sum_i8_pairs_int32x8(_mm256_blend_epi32(_mm256_shuffle_epi32(rhs_vec_0123_2, 177) ,rhs_vec_4567_2, 170), _mm256_shuffle_epi32(lhs_vec_1, 85))); + iacc = mul_sum_i8_pairs_acc_int32x8(iacc, _mm256_blend_epi32(rhs_vec_0123_2 ,_mm256_shuffle_epi32(rhs_vec_4567_2, 177), 170), _mm256_shuffle_epi32(lhs_vec_1, 0)); + iacc = mul_sum_i8_pairs_acc_int32x8(iacc, _mm256_blend_epi32(_mm256_shuffle_epi32(rhs_vec_0123_2, 177) ,rhs_vec_4567_2, 170), _mm256_shuffle_epi32(lhs_vec_1, 85)); - iacc = _mm256_add_epi32(iacc, mul_sum_i8_pairs_int32x8(_mm256_blend_epi32(rhs_vec_0123_3 ,_mm256_shuffle_epi32(rhs_vec_4567_3, 177), 170), _mm256_shuffle_epi32(lhs_vec_1, 170))); - iacc = _mm256_add_epi32(iacc, mul_sum_i8_pairs_int32x8(_mm256_blend_epi32(_mm256_shuffle_epi32(rhs_vec_0123_3, 177) ,rhs_vec_4567_3, 170), _mm256_shuffle_epi32(lhs_vec_1, 255))); + iacc = mul_sum_i8_pairs_acc_int32x8(iacc, _mm256_blend_epi32(rhs_vec_0123_3 ,_mm256_shuffle_epi32(rhs_vec_4567_3, 177), 170), _mm256_shuffle_epi32(lhs_vec_1, 170)); + iacc = mul_sum_i8_pairs_acc_int32x8(iacc, _mm256_blend_epi32(_mm256_shuffle_epi32(rhs_vec_0123_3, 177) ,rhs_vec_4567_3, 170), _mm256_shuffle_epi32(lhs_vec_1, 255)); // Accumulated values multipled with appropriate scales acc_row = _mm256_fmadd_ps(_mm256_cvtepi32_ps(iacc), _mm256_mul_ps(col_scale_f32, row_scale_f32), acc_row); @@ -3239,22 +3235,15 @@ static void ggml_gemm_q4_0_8x8_q8_0(int n, float * GGML_RESTRICT s, size_t bs, c // The values arranged in shuffle patterns are operated with dot product operation within 32 bit lane i.e corresponding bytes and multiplied and added into 32 bit integers within 32 bit lane // Resembles MMLAs into 2x2 matrices in ARM Version - __m512i iacc_mat_00_sp1 = - _mm512_add_epi32(_mm512_add_epi32(_mm512_add_epi32(mul_sum_i8_pairs_int32x16(lhs_mat_01_3_sp1, rhs_mat_014589CD_3_sp1), mul_sum_i8_pairs_int32x16(lhs_mat_01_2_sp1, rhs_mat_014589CD_2_sp1)), mul_sum_i8_pairs_int32x16(lhs_mat_01_1_sp1, rhs_mat_014589CD_1_sp1)), mul_sum_i8_pairs_int32x16(lhs_mat_01_0_sp1, rhs_mat_014589CD_0_sp1)); - __m512i iacc_mat_01_sp1 = - _mm512_add_epi32(_mm512_add_epi32(_mm512_add_epi32(mul_sum_i8_pairs_int32x16(lhs_mat_01_3_sp1, rhs_mat_2367ABEF_3_sp1), mul_sum_i8_pairs_int32x16(lhs_mat_01_2_sp1, rhs_mat_2367ABEF_2_sp1)), mul_sum_i8_pairs_int32x16(lhs_mat_01_1_sp1, rhs_mat_2367ABEF_1_sp1)), mul_sum_i8_pairs_int32x16(lhs_mat_01_0_sp1, rhs_mat_2367ABEF_0_sp1)); - __m512i iacc_mat_10_sp1 = - _mm512_add_epi32(_mm512_add_epi32(_mm512_add_epi32(mul_sum_i8_pairs_int32x16(lhs_mat_23_3_sp1, rhs_mat_014589CD_3_sp1), mul_sum_i8_pairs_int32x16(lhs_mat_23_2_sp1, rhs_mat_014589CD_2_sp1)), mul_sum_i8_pairs_int32x16(lhs_mat_23_1_sp1, rhs_mat_014589CD_1_sp1)), mul_sum_i8_pairs_int32x16(lhs_mat_23_0_sp1, rhs_mat_014589CD_0_sp1)); - __m512i iacc_mat_11_sp1 = - _mm512_add_epi32(_mm512_add_epi32(_mm512_add_epi32(mul_sum_i8_pairs_int32x16(lhs_mat_23_3_sp1, rhs_mat_2367ABEF_3_sp1), mul_sum_i8_pairs_int32x16(lhs_mat_23_2_sp1, rhs_mat_2367ABEF_2_sp1)), mul_sum_i8_pairs_int32x16(lhs_mat_23_1_sp1, rhs_mat_2367ABEF_1_sp1)), mul_sum_i8_pairs_int32x16(lhs_mat_23_0_sp1, rhs_mat_2367ABEF_0_sp1)); - __m512i iacc_mat_00_sp2 = - _mm512_add_epi32(_mm512_add_epi32(_mm512_add_epi32(mul_sum_i8_pairs_int32x16(lhs_mat_01_3_sp2, rhs_mat_014589CD_3_sp2), mul_sum_i8_pairs_int32x16(lhs_mat_01_2_sp2, rhs_mat_014589CD_2_sp2)), mul_sum_i8_pairs_int32x16(lhs_mat_01_1_sp2, rhs_mat_014589CD_1_sp2)), mul_sum_i8_pairs_int32x16(lhs_mat_01_0_sp2, rhs_mat_014589CD_0_sp2)); - __m512i iacc_mat_01_sp2 = - _mm512_add_epi32(_mm512_add_epi32(_mm512_add_epi32(mul_sum_i8_pairs_int32x16(lhs_mat_01_3_sp2, rhs_mat_2367ABEF_3_sp2), mul_sum_i8_pairs_int32x16(lhs_mat_01_2_sp2, rhs_mat_2367ABEF_2_sp2)), mul_sum_i8_pairs_int32x16(lhs_mat_01_1_sp2, rhs_mat_2367ABEF_1_sp2)), mul_sum_i8_pairs_int32x16(lhs_mat_01_0_sp2, rhs_mat_2367ABEF_0_sp2)); - __m512i iacc_mat_10_sp2 = - _mm512_add_epi32(_mm512_add_epi32(_mm512_add_epi32(mul_sum_i8_pairs_int32x16(lhs_mat_23_3_sp2, rhs_mat_014589CD_3_sp2), mul_sum_i8_pairs_int32x16(lhs_mat_23_2_sp2, rhs_mat_014589CD_2_sp2)), mul_sum_i8_pairs_int32x16(lhs_mat_23_1_sp2, rhs_mat_014589CD_1_sp2)), mul_sum_i8_pairs_int32x16(lhs_mat_23_0_sp2, rhs_mat_014589CD_0_sp2)); - __m512i iacc_mat_11_sp2 = - _mm512_add_epi32(_mm512_add_epi32(_mm512_add_epi32(mul_sum_i8_pairs_int32x16(lhs_mat_23_3_sp2, rhs_mat_2367ABEF_3_sp2), mul_sum_i8_pairs_int32x16(lhs_mat_23_2_sp2, rhs_mat_2367ABEF_2_sp2)), mul_sum_i8_pairs_int32x16(lhs_mat_23_1_sp2, rhs_mat_2367ABEF_1_sp2)), mul_sum_i8_pairs_int32x16(lhs_mat_23_0_sp2, rhs_mat_2367ABEF_0_sp2)); + const __m512i zero = _mm512_setzero_epi32(); + __m512i iacc_mat_00_sp1 = mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(zero, lhs_mat_01_3_sp1, rhs_mat_014589CD_3_sp1), lhs_mat_01_2_sp1, rhs_mat_014589CD_2_sp1), lhs_mat_01_1_sp1, rhs_mat_014589CD_1_sp1), lhs_mat_01_0_sp1, rhs_mat_014589CD_0_sp1); + __m512i iacc_mat_01_sp1 = mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(zero, lhs_mat_01_3_sp1, rhs_mat_2367ABEF_3_sp1), lhs_mat_01_2_sp1, rhs_mat_2367ABEF_2_sp1), lhs_mat_01_1_sp1, rhs_mat_2367ABEF_1_sp1), lhs_mat_01_0_sp1, rhs_mat_2367ABEF_0_sp1); + __m512i iacc_mat_10_sp1 = mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(zero, lhs_mat_23_3_sp1, rhs_mat_014589CD_3_sp1), lhs_mat_23_2_sp1, rhs_mat_014589CD_2_sp1), lhs_mat_23_1_sp1, rhs_mat_014589CD_1_sp1), lhs_mat_23_0_sp1, rhs_mat_014589CD_0_sp1); + __m512i iacc_mat_11_sp1 = mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(zero, lhs_mat_23_3_sp1, rhs_mat_2367ABEF_3_sp1), lhs_mat_23_2_sp1, rhs_mat_2367ABEF_2_sp1), lhs_mat_23_1_sp1, rhs_mat_2367ABEF_1_sp1), lhs_mat_23_0_sp1, rhs_mat_2367ABEF_0_sp1); + __m512i iacc_mat_00_sp2 = mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(zero, lhs_mat_01_3_sp2, rhs_mat_014589CD_3_sp2), lhs_mat_01_2_sp2, rhs_mat_014589CD_2_sp2), lhs_mat_01_1_sp2, rhs_mat_014589CD_1_sp2), lhs_mat_01_0_sp2, rhs_mat_014589CD_0_sp2); + __m512i iacc_mat_01_sp2 = mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(zero, lhs_mat_01_3_sp2, rhs_mat_2367ABEF_3_sp2), lhs_mat_01_2_sp2, rhs_mat_2367ABEF_2_sp2), lhs_mat_01_1_sp2, rhs_mat_2367ABEF_1_sp2), lhs_mat_01_0_sp2, rhs_mat_2367ABEF_0_sp2); + __m512i iacc_mat_10_sp2 = mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(zero, lhs_mat_23_3_sp2, rhs_mat_014589CD_3_sp2), lhs_mat_23_2_sp2, rhs_mat_014589CD_2_sp2), lhs_mat_23_1_sp2, rhs_mat_014589CD_1_sp2), lhs_mat_23_0_sp2, rhs_mat_014589CD_0_sp2); + __m512i iacc_mat_11_sp2 = mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(zero, lhs_mat_23_3_sp2, rhs_mat_2367ABEF_3_sp2), lhs_mat_23_2_sp2, rhs_mat_2367ABEF_2_sp2), lhs_mat_23_1_sp2, rhs_mat_2367ABEF_1_sp2), lhs_mat_23_0_sp2, rhs_mat_2367ABEF_0_sp2); // Output of both shuffle patterns are added in order to sum dot product outputs of all 32 values in block __m512i iacc_mat_00 = _mm512_add_epi32(iacc_mat_00_sp1, iacc_mat_00_sp2); @@ -3430,22 +3419,15 @@ static void ggml_gemm_q4_0_8x8_q8_0(int n, float * GGML_RESTRICT s, size_t bs, c // The values arranged in shuffle patterns are operated with dot product operation within 32 bit lane i.e corresponding bytes and multiplied and added into 32 bit integers within 32 bit lane // Resembles MMLAs into 2x2 matrices in ARM Version - __m512i iacc_mat_00_sp1 = - _mm512_add_epi32(_mm512_add_epi32(_mm512_add_epi32(mul_sum_i8_pairs_int32x16(lhs_mat_01_3_sp1, rhs_mat_014589CD_3_sp1), mul_sum_i8_pairs_int32x16(lhs_mat_01_2_sp1, rhs_mat_014589CD_2_sp1)), mul_sum_i8_pairs_int32x16(lhs_mat_01_1_sp1, rhs_mat_014589CD_1_sp1)), mul_sum_i8_pairs_int32x16(lhs_mat_01_0_sp1, rhs_mat_014589CD_0_sp1)); - __m512i iacc_mat_01_sp1 = - _mm512_add_epi32(_mm512_add_epi32(_mm512_add_epi32(mul_sum_i8_pairs_int32x16(lhs_mat_01_3_sp1, rhs_mat_2367ABEF_3_sp1), mul_sum_i8_pairs_int32x16(lhs_mat_01_2_sp1, rhs_mat_2367ABEF_2_sp1)), mul_sum_i8_pairs_int32x16(lhs_mat_01_1_sp1, rhs_mat_2367ABEF_1_sp1)), mul_sum_i8_pairs_int32x16(lhs_mat_01_0_sp1, rhs_mat_2367ABEF_0_sp1)); - __m512i iacc_mat_10_sp1 = - _mm512_add_epi32(_mm512_add_epi32(_mm512_add_epi32(mul_sum_i8_pairs_int32x16(lhs_mat_23_3_sp1, rhs_mat_014589CD_3_sp1), mul_sum_i8_pairs_int32x16(lhs_mat_23_2_sp1, rhs_mat_014589CD_2_sp1)), mul_sum_i8_pairs_int32x16(lhs_mat_23_1_sp1, rhs_mat_014589CD_1_sp1)), mul_sum_i8_pairs_int32x16(lhs_mat_23_0_sp1, rhs_mat_014589CD_0_sp1)); - __m512i iacc_mat_11_sp1 = - _mm512_add_epi32(_mm512_add_epi32(_mm512_add_epi32(mul_sum_i8_pairs_int32x16(lhs_mat_23_3_sp1, rhs_mat_2367ABEF_3_sp1), mul_sum_i8_pairs_int32x16(lhs_mat_23_2_sp1, rhs_mat_2367ABEF_2_sp1)), mul_sum_i8_pairs_int32x16(lhs_mat_23_1_sp1, rhs_mat_2367ABEF_1_sp1)), mul_sum_i8_pairs_int32x16(lhs_mat_23_0_sp1, rhs_mat_2367ABEF_0_sp1)); - __m512i iacc_mat_00_sp2 = - _mm512_add_epi32(_mm512_add_epi32(_mm512_add_epi32(mul_sum_i8_pairs_int32x16(lhs_mat_01_3_sp2, rhs_mat_014589CD_3_sp2), mul_sum_i8_pairs_int32x16(lhs_mat_01_2_sp2, rhs_mat_014589CD_2_sp2)), mul_sum_i8_pairs_int32x16(lhs_mat_01_1_sp2, rhs_mat_014589CD_1_sp2)), mul_sum_i8_pairs_int32x16(lhs_mat_01_0_sp2, rhs_mat_014589CD_0_sp2)); - __m512i iacc_mat_01_sp2 = - _mm512_add_epi32(_mm512_add_epi32(_mm512_add_epi32(mul_sum_i8_pairs_int32x16(lhs_mat_01_3_sp2, rhs_mat_2367ABEF_3_sp2), mul_sum_i8_pairs_int32x16(lhs_mat_01_2_sp2, rhs_mat_2367ABEF_2_sp2)), mul_sum_i8_pairs_int32x16(lhs_mat_01_1_sp2, rhs_mat_2367ABEF_1_sp2)), mul_sum_i8_pairs_int32x16(lhs_mat_01_0_sp2, rhs_mat_2367ABEF_0_sp2)); - __m512i iacc_mat_10_sp2 = - _mm512_add_epi32(_mm512_add_epi32(_mm512_add_epi32(mul_sum_i8_pairs_int32x16(lhs_mat_23_3_sp2, rhs_mat_014589CD_3_sp2), mul_sum_i8_pairs_int32x16(lhs_mat_23_2_sp2, rhs_mat_014589CD_2_sp2)), mul_sum_i8_pairs_int32x16(lhs_mat_23_1_sp2, rhs_mat_014589CD_1_sp2)), mul_sum_i8_pairs_int32x16(lhs_mat_23_0_sp2, rhs_mat_014589CD_0_sp2)); - __m512i iacc_mat_11_sp2 = - _mm512_add_epi32(_mm512_add_epi32(_mm512_add_epi32(mul_sum_i8_pairs_int32x16(lhs_mat_23_3_sp2, rhs_mat_2367ABEF_3_sp2), mul_sum_i8_pairs_int32x16(lhs_mat_23_2_sp2, rhs_mat_2367ABEF_2_sp2)), mul_sum_i8_pairs_int32x16(lhs_mat_23_1_sp2, rhs_mat_2367ABEF_1_sp2)), mul_sum_i8_pairs_int32x16(lhs_mat_23_0_sp2, rhs_mat_2367ABEF_0_sp2)); + const __m512i zero = _mm512_setzero_epi32(); + __m512i iacc_mat_00_sp1 = mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(zero, lhs_mat_01_3_sp1, rhs_mat_014589CD_3_sp1), lhs_mat_01_2_sp1, rhs_mat_014589CD_2_sp1), lhs_mat_01_1_sp1, rhs_mat_014589CD_1_sp1), lhs_mat_01_0_sp1, rhs_mat_014589CD_0_sp1); + __m512i iacc_mat_01_sp1 = mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(zero, lhs_mat_01_3_sp1, rhs_mat_2367ABEF_3_sp1), lhs_mat_01_2_sp1, rhs_mat_2367ABEF_2_sp1), lhs_mat_01_1_sp1, rhs_mat_2367ABEF_1_sp1), lhs_mat_01_0_sp1, rhs_mat_2367ABEF_0_sp1); + __m512i iacc_mat_10_sp1 = mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(zero, lhs_mat_23_3_sp1, rhs_mat_014589CD_3_sp1), lhs_mat_23_2_sp1, rhs_mat_014589CD_2_sp1), lhs_mat_23_1_sp1, rhs_mat_014589CD_1_sp1), lhs_mat_23_0_sp1, rhs_mat_014589CD_0_sp1); + __m512i iacc_mat_11_sp1 = mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(zero, lhs_mat_23_3_sp1, rhs_mat_2367ABEF_3_sp1), lhs_mat_23_2_sp1, rhs_mat_2367ABEF_2_sp1), lhs_mat_23_1_sp1, rhs_mat_2367ABEF_1_sp1), lhs_mat_23_0_sp1, rhs_mat_2367ABEF_0_sp1); + __m512i iacc_mat_00_sp2 = mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(zero, lhs_mat_01_3_sp2, rhs_mat_014589CD_3_sp2), lhs_mat_01_2_sp2, rhs_mat_014589CD_2_sp2), lhs_mat_01_1_sp2, rhs_mat_014589CD_1_sp2), lhs_mat_01_0_sp2, rhs_mat_014589CD_0_sp2); + __m512i iacc_mat_01_sp2 = mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(zero, lhs_mat_01_3_sp2, rhs_mat_2367ABEF_3_sp2), lhs_mat_01_2_sp2, rhs_mat_2367ABEF_2_sp2), lhs_mat_01_1_sp2, rhs_mat_2367ABEF_1_sp2), lhs_mat_01_0_sp2, rhs_mat_2367ABEF_0_sp2); + __m512i iacc_mat_10_sp2 = mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(zero, lhs_mat_23_3_sp2, rhs_mat_014589CD_3_sp2), lhs_mat_23_2_sp2, rhs_mat_014589CD_2_sp2), lhs_mat_23_1_sp2, rhs_mat_014589CD_1_sp2), lhs_mat_23_0_sp2, rhs_mat_014589CD_0_sp2); + __m512i iacc_mat_11_sp2 = mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(zero, lhs_mat_23_3_sp2, rhs_mat_2367ABEF_3_sp2), lhs_mat_23_2_sp2, rhs_mat_2367ABEF_2_sp2), lhs_mat_23_1_sp2, rhs_mat_2367ABEF_1_sp2), lhs_mat_23_0_sp2, rhs_mat_2367ABEF_0_sp2); // Output of both shuffle patterns are added in order to sum dot product outputs of all 32 values in block __m512i iacc_mat_00 = _mm512_add_epi32(iacc_mat_00_sp1, iacc_mat_00_sp2); @@ -3605,22 +3587,15 @@ static void ggml_gemm_q4_0_8x8_q8_0(int n, float * GGML_RESTRICT s, size_t bs, c // The values arranged in shuffle patterns are operated with dot product operation within 32 bit lane i.e corresponding bytes and multiplied and added into 32 bit integers within 32 bit lane // Resembles MMLAs into 2x2 matrices in ARM Version - __m256i iacc_mat_00_sp1 = - _mm256_add_epi32(_mm256_add_epi32(_mm256_add_epi32(mul_sum_i8_pairs_int32x8(lhs_mat_01_3_sp1, rhs_mat_0145_3_sp1), mul_sum_i8_pairs_int32x8(lhs_mat_01_2_sp1, rhs_mat_0145_2_sp1)), mul_sum_i8_pairs_int32x8(lhs_mat_01_1_sp1, rhs_mat_0145_1_sp1)), mul_sum_i8_pairs_int32x8(lhs_mat_01_0_sp1, rhs_mat_0145_0_sp1)); - __m256i iacc_mat_01_sp1 = - _mm256_add_epi32(_mm256_add_epi32(_mm256_add_epi32(mul_sum_i8_pairs_int32x8(lhs_mat_01_3_sp1, rhs_mat_2367_3_sp1), mul_sum_i8_pairs_int32x8(lhs_mat_01_2_sp1, rhs_mat_2367_2_sp1)), mul_sum_i8_pairs_int32x8(lhs_mat_01_1_sp1, rhs_mat_2367_1_sp1)), mul_sum_i8_pairs_int32x8(lhs_mat_01_0_sp1, rhs_mat_2367_0_sp1)); - __m256i iacc_mat_10_sp1 = - _mm256_add_epi32(_mm256_add_epi32(_mm256_add_epi32(mul_sum_i8_pairs_int32x8(lhs_mat_23_3_sp1, rhs_mat_0145_3_sp1), mul_sum_i8_pairs_int32x8(lhs_mat_23_2_sp1, rhs_mat_0145_2_sp1)), mul_sum_i8_pairs_int32x8(lhs_mat_23_1_sp1, rhs_mat_0145_1_sp1)), mul_sum_i8_pairs_int32x8(lhs_mat_23_0_sp1, rhs_mat_0145_0_sp1)); - __m256i iacc_mat_11_sp1 = - _mm256_add_epi32(_mm256_add_epi32(_mm256_add_epi32(mul_sum_i8_pairs_int32x8(lhs_mat_23_3_sp1, rhs_mat_2367_3_sp1), mul_sum_i8_pairs_int32x8(lhs_mat_23_2_sp1, rhs_mat_2367_2_sp1)), mul_sum_i8_pairs_int32x8(lhs_mat_23_1_sp1, rhs_mat_2367_1_sp1)), mul_sum_i8_pairs_int32x8(lhs_mat_23_0_sp1, rhs_mat_2367_0_sp1)); - __m256i iacc_mat_00_sp2 = - _mm256_add_epi32(_mm256_add_epi32(_mm256_add_epi32(mul_sum_i8_pairs_int32x8(lhs_mat_01_3_sp2, rhs_mat_0145_3_sp2), mul_sum_i8_pairs_int32x8(lhs_mat_01_2_sp2, rhs_mat_0145_2_sp2)), mul_sum_i8_pairs_int32x8(lhs_mat_01_1_sp2, rhs_mat_0145_1_sp2)), mul_sum_i8_pairs_int32x8(lhs_mat_01_0_sp2, rhs_mat_0145_0_sp2)); - __m256i iacc_mat_01_sp2 = - _mm256_add_epi32(_mm256_add_epi32(_mm256_add_epi32(mul_sum_i8_pairs_int32x8(lhs_mat_01_3_sp2, rhs_mat_2367_3_sp2), mul_sum_i8_pairs_int32x8(lhs_mat_01_2_sp2, rhs_mat_2367_2_sp2)), mul_sum_i8_pairs_int32x8(lhs_mat_01_1_sp2, rhs_mat_2367_1_sp2)), mul_sum_i8_pairs_int32x8(lhs_mat_01_0_sp2, rhs_mat_2367_0_sp2)); - __m256i iacc_mat_10_sp2 = - _mm256_add_epi32(_mm256_add_epi32(_mm256_add_epi32(mul_sum_i8_pairs_int32x8(lhs_mat_23_3_sp2, rhs_mat_0145_3_sp2), mul_sum_i8_pairs_int32x8(lhs_mat_23_2_sp2, rhs_mat_0145_2_sp2)), mul_sum_i8_pairs_int32x8(lhs_mat_23_1_sp2, rhs_mat_0145_1_sp2)), mul_sum_i8_pairs_int32x8(lhs_mat_23_0_sp2, rhs_mat_0145_0_sp2)); - __m256i iacc_mat_11_sp2 = - _mm256_add_epi32(_mm256_add_epi32(_mm256_add_epi32(mul_sum_i8_pairs_int32x8(lhs_mat_23_3_sp2, rhs_mat_2367_3_sp2), mul_sum_i8_pairs_int32x8(lhs_mat_23_2_sp2, rhs_mat_2367_2_sp2)), mul_sum_i8_pairs_int32x8(lhs_mat_23_1_sp2, rhs_mat_2367_1_sp2)), mul_sum_i8_pairs_int32x8(lhs_mat_23_0_sp2, rhs_mat_2367_0_sp2)); + const __m256i zero = _mm256_setzero_si256(); + __m256i iacc_mat_00_sp1 = mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(zero, lhs_mat_01_3_sp1, rhs_mat_0145_3_sp1), lhs_mat_01_2_sp1, rhs_mat_0145_2_sp1), lhs_mat_01_1_sp1, rhs_mat_0145_1_sp1), lhs_mat_01_0_sp1, rhs_mat_0145_0_sp1); + __m256i iacc_mat_01_sp1 = mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(zero, lhs_mat_01_3_sp1, rhs_mat_2367_3_sp1), lhs_mat_01_2_sp1, rhs_mat_2367_2_sp1), lhs_mat_01_1_sp1, rhs_mat_2367_1_sp1), lhs_mat_01_0_sp1, rhs_mat_2367_0_sp1); + __m256i iacc_mat_10_sp1 = mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(zero, lhs_mat_23_3_sp1, rhs_mat_0145_3_sp1), lhs_mat_23_2_sp1, rhs_mat_0145_2_sp1), lhs_mat_23_1_sp1, rhs_mat_0145_1_sp1), lhs_mat_23_0_sp1, rhs_mat_0145_0_sp1); + __m256i iacc_mat_11_sp1 = mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(zero, lhs_mat_23_3_sp1, rhs_mat_2367_3_sp1), lhs_mat_23_2_sp1, rhs_mat_2367_2_sp1), lhs_mat_23_1_sp1, rhs_mat_2367_1_sp1), lhs_mat_23_0_sp1, rhs_mat_2367_0_sp1); + __m256i iacc_mat_00_sp2 = mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(zero, lhs_mat_01_3_sp2, rhs_mat_0145_3_sp2), lhs_mat_01_2_sp2, rhs_mat_0145_2_sp2), lhs_mat_01_1_sp2, rhs_mat_0145_1_sp2), lhs_mat_01_0_sp2, rhs_mat_0145_0_sp2); + __m256i iacc_mat_01_sp2 = mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(zero, lhs_mat_01_3_sp2, rhs_mat_2367_3_sp2), lhs_mat_01_2_sp2, rhs_mat_2367_2_sp2), lhs_mat_01_1_sp2, rhs_mat_2367_1_sp2), lhs_mat_01_0_sp2, rhs_mat_2367_0_sp2); + __m256i iacc_mat_10_sp2 = mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(zero, lhs_mat_23_3_sp2, rhs_mat_0145_3_sp2), lhs_mat_23_2_sp2, rhs_mat_0145_2_sp2), lhs_mat_23_1_sp2, rhs_mat_0145_1_sp2), lhs_mat_23_0_sp2, rhs_mat_0145_0_sp2); + __m256i iacc_mat_11_sp2 = mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(zero, lhs_mat_23_3_sp2, rhs_mat_2367_3_sp2), lhs_mat_23_2_sp2, rhs_mat_2367_2_sp2), lhs_mat_23_1_sp2, rhs_mat_2367_1_sp2), lhs_mat_23_0_sp2, rhs_mat_2367_0_sp2); // Output of both shuffle patterns are added in order to sum dot product outputs of all 32 values in block __m256i iacc_mat_00 = _mm256_add_epi32(iacc_mat_00_sp1, iacc_mat_00_sp2); @@ -3769,22 +3744,15 @@ static void ggml_gemm_q4_0_8x8_q8_0(int n, float * GGML_RESTRICT s, size_t bs, c // The values arranged in shuffle patterns are operated with dot product operation within 32 bit lane i.e corresponding bytes and multiplied and added into 32 bit integers within 32 bit lane // Resembles MMLAs into 2x2 matrices in ARM Version - __m256i iacc_mat_00_sp1 = - _mm256_add_epi32(_mm256_add_epi32(_mm256_add_epi32(mul_sum_i8_pairs_int32x8(lhs_mat_01_3_sp1, rhs_mat_0145_3_sp1), mul_sum_i8_pairs_int32x8(lhs_mat_01_2_sp1, rhs_mat_0145_2_sp1)), mul_sum_i8_pairs_int32x8(lhs_mat_01_1_sp1, rhs_mat_0145_1_sp1)), mul_sum_i8_pairs_int32x8(lhs_mat_01_0_sp1, rhs_mat_0145_0_sp1)); - __m256i iacc_mat_01_sp1 = - _mm256_add_epi32(_mm256_add_epi32(_mm256_add_epi32(mul_sum_i8_pairs_int32x8(lhs_mat_01_3_sp1, rhs_mat_2367_3_sp1), mul_sum_i8_pairs_int32x8(lhs_mat_01_2_sp1, rhs_mat_2367_2_sp1)), mul_sum_i8_pairs_int32x8(lhs_mat_01_1_sp1, rhs_mat_2367_1_sp1)), mul_sum_i8_pairs_int32x8(lhs_mat_01_0_sp1, rhs_mat_2367_0_sp1)); - __m256i iacc_mat_10_sp1 = - _mm256_add_epi32(_mm256_add_epi32(_mm256_add_epi32(mul_sum_i8_pairs_int32x8(lhs_mat_23_3_sp1, rhs_mat_0145_3_sp1), mul_sum_i8_pairs_int32x8(lhs_mat_23_2_sp1, rhs_mat_0145_2_sp1)), mul_sum_i8_pairs_int32x8(lhs_mat_23_1_sp1, rhs_mat_0145_1_sp1)), mul_sum_i8_pairs_int32x8(lhs_mat_23_0_sp1, rhs_mat_0145_0_sp1)); - __m256i iacc_mat_11_sp1 = - _mm256_add_epi32(_mm256_add_epi32(_mm256_add_epi32(mul_sum_i8_pairs_int32x8(lhs_mat_23_3_sp1, rhs_mat_2367_3_sp1), mul_sum_i8_pairs_int32x8(lhs_mat_23_2_sp1, rhs_mat_2367_2_sp1)), mul_sum_i8_pairs_int32x8(lhs_mat_23_1_sp1, rhs_mat_2367_1_sp1)), mul_sum_i8_pairs_int32x8(lhs_mat_23_0_sp1, rhs_mat_2367_0_sp1)); - __m256i iacc_mat_00_sp2 = - _mm256_add_epi32(_mm256_add_epi32(_mm256_add_epi32(mul_sum_i8_pairs_int32x8(lhs_mat_01_3_sp2, rhs_mat_0145_3_sp2), mul_sum_i8_pairs_int32x8(lhs_mat_01_2_sp2, rhs_mat_0145_2_sp2)), mul_sum_i8_pairs_int32x8(lhs_mat_01_1_sp2, rhs_mat_0145_1_sp2)), mul_sum_i8_pairs_int32x8(lhs_mat_01_0_sp2, rhs_mat_0145_0_sp2)); - __m256i iacc_mat_01_sp2 = - _mm256_add_epi32(_mm256_add_epi32(_mm256_add_epi32(mul_sum_i8_pairs_int32x8(lhs_mat_01_3_sp2, rhs_mat_2367_3_sp2), mul_sum_i8_pairs_int32x8(lhs_mat_01_2_sp2, rhs_mat_2367_2_sp2)), mul_sum_i8_pairs_int32x8(lhs_mat_01_1_sp2, rhs_mat_2367_1_sp2)), mul_sum_i8_pairs_int32x8(lhs_mat_01_0_sp2, rhs_mat_2367_0_sp2)); - __m256i iacc_mat_10_sp2 = - _mm256_add_epi32(_mm256_add_epi32(_mm256_add_epi32(mul_sum_i8_pairs_int32x8(lhs_mat_23_3_sp2, rhs_mat_0145_3_sp2), mul_sum_i8_pairs_int32x8(lhs_mat_23_2_sp2, rhs_mat_0145_2_sp2)), mul_sum_i8_pairs_int32x8(lhs_mat_23_1_sp2, rhs_mat_0145_1_sp2)), mul_sum_i8_pairs_int32x8(lhs_mat_23_0_sp2, rhs_mat_0145_0_sp2)); - __m256i iacc_mat_11_sp2 = - _mm256_add_epi32(_mm256_add_epi32(_mm256_add_epi32(mul_sum_i8_pairs_int32x8(lhs_mat_23_3_sp2, rhs_mat_2367_3_sp2), mul_sum_i8_pairs_int32x8(lhs_mat_23_2_sp2, rhs_mat_2367_2_sp2)), mul_sum_i8_pairs_int32x8(lhs_mat_23_1_sp2, rhs_mat_2367_1_sp2)), mul_sum_i8_pairs_int32x8(lhs_mat_23_0_sp2, rhs_mat_2367_0_sp2)); + const __m256i zero = _mm256_setzero_si256(); + __m256i iacc_mat_00_sp1 = mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(zero, lhs_mat_01_3_sp1, rhs_mat_0145_3_sp1), lhs_mat_01_2_sp1, rhs_mat_0145_2_sp1), lhs_mat_01_1_sp1, rhs_mat_0145_1_sp1), lhs_mat_01_0_sp1, rhs_mat_0145_0_sp1); + __m256i iacc_mat_01_sp1 = mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(zero, lhs_mat_01_3_sp1, rhs_mat_2367_3_sp1), lhs_mat_01_2_sp1, rhs_mat_2367_2_sp1), lhs_mat_01_1_sp1, rhs_mat_2367_1_sp1), lhs_mat_01_0_sp1, rhs_mat_2367_0_sp1); + __m256i iacc_mat_10_sp1 = mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(zero, lhs_mat_23_3_sp1, rhs_mat_0145_3_sp1), lhs_mat_23_2_sp1, rhs_mat_0145_2_sp1), lhs_mat_23_1_sp1, rhs_mat_0145_1_sp1), lhs_mat_23_0_sp1, rhs_mat_0145_0_sp1); + __m256i iacc_mat_11_sp1 = mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(zero, lhs_mat_23_3_sp1, rhs_mat_2367_3_sp1), lhs_mat_23_2_sp1, rhs_mat_2367_2_sp1), lhs_mat_23_1_sp1, rhs_mat_2367_1_sp1), lhs_mat_23_0_sp1, rhs_mat_2367_0_sp1); + __m256i iacc_mat_00_sp2 = mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(zero, lhs_mat_01_3_sp2, rhs_mat_0145_3_sp2), lhs_mat_01_2_sp2, rhs_mat_0145_2_sp2), lhs_mat_01_1_sp2, rhs_mat_0145_1_sp2), lhs_mat_01_0_sp2, rhs_mat_0145_0_sp2); + __m256i iacc_mat_01_sp2 = mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(zero, lhs_mat_01_3_sp2, rhs_mat_2367_3_sp2), lhs_mat_01_2_sp2, rhs_mat_2367_2_sp2), lhs_mat_01_1_sp2, rhs_mat_2367_1_sp2), lhs_mat_01_0_sp2, rhs_mat_2367_0_sp2); + __m256i iacc_mat_10_sp2 = mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(zero, lhs_mat_23_3_sp2, rhs_mat_0145_3_sp2), lhs_mat_23_2_sp2, rhs_mat_0145_2_sp2), lhs_mat_23_1_sp2, rhs_mat_0145_1_sp2), lhs_mat_23_0_sp2, rhs_mat_0145_0_sp2); + __m256i iacc_mat_11_sp2 = mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(zero, lhs_mat_23_3_sp2, rhs_mat_2367_3_sp2), lhs_mat_23_2_sp2, rhs_mat_2367_2_sp2), lhs_mat_23_1_sp2, rhs_mat_2367_1_sp2), lhs_mat_23_0_sp2, rhs_mat_2367_0_sp2); // Output of both shuffle patterns are added in order to sum dot product outputs of all 32 values in block __m256i iacc_mat_00 = _mm256_add_epi32(iacc_mat_00_sp1, iacc_mat_00_sp2);
diff --git a/ggml/src/ggml-cpu/ggml-cpu-aarch64.cpp b/ggml/src/ggml-cpu/ggml-cpu-aarch64.cpp index 74a31abb2d6fc..ced09c05c2ccb 100644 --- a/ggml/src/ggml-cpu/ggml-cpu-aarch64.cpp +++ b/ggml/src/ggml-cpu/ggml-cpu-aarch64.cpp @@ -183,67 +183,63 @@ static inline __m256 __avx_rearranged_f32cx8_load(ggml_fp16_t *x, __m128i arrang #if defined(__AVX2__) || defined(__AVX512F__) #if defined(__AVX512F__) -// add int16_t pairwise and return as 512 bit int vector +static inline __m512i sum_i16_pairs_acc_int32x16(const __m512i acc, const __m512i x) { const __m512i ones = _mm512_set1_epi16(1); - return _mm512_madd_epi16(ones, x); -static inline __m512i mul_sum_us8_pairs_int32x16(const __m512i ax, const __m512i sy) { +static inline __m512i mul_sum_us8_pairs_acc_int32x16(const __m512i acc, const __m512i ax, const __m512i sy) { #if defined(__AVX512VNNI__) - const __m512i zero = _mm512_setzero_si512(); - return _mm512_dpbusd_epi32(zero, ax, sy); + return _mm512_dpbusd_epi32(acc, ax, sy); const __m512i dot = _mm512_maddubs_epi16(ax, sy); - return sum_i16_pairs_int_32x16(dot); + return sum_i16_pairs_acc_int32x16(acc, dot); +static inline __m512i mul_sum_i8_pairs_acc_int32x16(const __m512i acc, const __m512i x, const __m512i y) { const __m512i zero = _mm512_setzero_si512(); const __m512i ax = _mm512_abs_epi8(x); __mmask64 blt0 = _mm512_movepi8_mask(x); const __m512i sy = _mm512_mask_sub_epi8(y, blt0, zero, y); - return mul_sum_us8_pairs_int32x16(ax, sy); + return mul_sum_us8_pairs_acc_int32x16(acc, ax, sy); -// add int16_t pairwise and return as 256 bit int vector -static inline __m256i sum_i16_pairs_int32x8(const __m256i x) { +// add int16_t pairwise and return as 256 bit int vector, then add the accumulator const __m256i ones = _mm256_set1_epi16(1); + return _mm256_add_epi32(acc, _mm256_madd_epi16(ones, x)); -static inline __m256i mul_sum_us8_pairs_int32x8(const __m256i ax, const __m256i sy) { +static inline __m256i mul_sum_us8_pairs_acc_int32x8(const __m256i acc, const __m256i ax, const __m256i sy) { #if defined(__AVX512VNNI__) && defined(__AVX512VL__) - return _mm256_dpbusd_epi32(zero, ax, sy); #elif defined(__AVXVNNI__) - return _mm256_dpbusd_avx_epi32(zero, ax, sy); + return _mm256_dpbusd_avx_epi32(acc, ax, sy); const __m256i dot = _mm256_maddubs_epi16(ax, sy); - return sum_i16_pairs_int32x8(dot); + return sum_i16_pairs_acc_int32x8(acc, dot); // Integer variant of the function defined in ggml-quants.c -// multiply int8_t, add results pairwise twice and return as 256 bit int vector -static inline __m256i mul_sum_i8_pairs_int32x8(const __m256i x, const __m256i y) { -#if __AVXVNNIINT8__ - return _mm256_dpbssd_epi32(zero, x, y); +static inline __m256i mul_sum_i8_pairs_acc_int32x8(const __m256i acc, const __m256i x, const __m256i y) { +#if defined(__AVXVNNIINT8__) + return _mm256_dpbssd_epi32(acc, x, y); const __m256i ax = _mm256_sign_epi8(x, x); const __m256i sy = _mm256_sign_epi8(y, x); - return mul_sum_us8_pairs_int32x8(ax, sy); + return mul_sum_us8_pairs_acc_int32x8(acc, ax, sy); @@ -1175,17 +1171,17 @@ static void ggml_gemv_q4_0_8x8_q8_0(int n, float * GGML_RESTRICT s, size_t bs, c // ........................................................................... // B0(28-31) B4(28-31) B1(28-31) B5(28-31) B2(28-31) B6(28-31) B3(28-31) B7(28-31) with A0(28-31) - iacc = _mm256_add_epi32(iacc, mul_sum_i8_pairs_int32x8(_mm256_blend_epi32(rhs_vec_0123_0 ,_mm256_shuffle_epi32(rhs_vec_4567_0, 177), 170), _mm256_shuffle_epi32(lhs_vec_0, 0))); + iacc = mul_sum_i8_pairs_acc_int32x8(iacc, _mm256_blend_epi32(_mm256_shuffle_epi32(rhs_vec_0123_0, 177) ,rhs_vec_4567_0, 170), _mm256_shuffle_epi32(lhs_vec_0, 85)); - iacc = _mm256_add_epi32(iacc, mul_sum_i8_pairs_int32x8(_mm256_blend_epi32(_mm256_shuffle_epi32(rhs_vec_0123_1, 177) ,rhs_vec_4567_1, 170), _mm256_shuffle_epi32(lhs_vec_0, 255))); + iacc = mul_sum_i8_pairs_acc_int32x8(iacc, _mm256_blend_epi32(_mm256_shuffle_epi32(rhs_vec_0123_1, 177) ,rhs_vec_4567_1, 170), _mm256_shuffle_epi32(lhs_vec_0, 255)); - iacc = _mm256_add_epi32(iacc, mul_sum_i8_pairs_int32x8(_mm256_blend_epi32(rhs_vec_0123_2 ,_mm256_shuffle_epi32(rhs_vec_4567_2, 177), 170), _mm256_shuffle_epi32(lhs_vec_1, 0))); - iacc = _mm256_add_epi32(iacc, mul_sum_i8_pairs_int32x8(_mm256_blend_epi32(_mm256_shuffle_epi32(rhs_vec_0123_2, 177) ,rhs_vec_4567_2, 170), _mm256_shuffle_epi32(lhs_vec_1, 85))); + iacc = mul_sum_i8_pairs_acc_int32x8(iacc, _mm256_blend_epi32(rhs_vec_0123_2 ,_mm256_shuffle_epi32(rhs_vec_4567_2, 177), 170), _mm256_shuffle_epi32(lhs_vec_1, 0)); + iacc = mul_sum_i8_pairs_acc_int32x8(iacc, _mm256_blend_epi32(_mm256_shuffle_epi32(rhs_vec_0123_2, 177) ,rhs_vec_4567_2, 170), _mm256_shuffle_epi32(lhs_vec_1, 85)); - iacc = _mm256_add_epi32(iacc, mul_sum_i8_pairs_int32x8(_mm256_blend_epi32(rhs_vec_0123_3 ,_mm256_shuffle_epi32(rhs_vec_4567_3, 177), 170), _mm256_shuffle_epi32(lhs_vec_1, 170))); - iacc = _mm256_add_epi32(iacc, mul_sum_i8_pairs_int32x8(_mm256_blend_epi32(_mm256_shuffle_epi32(rhs_vec_0123_3, 177) ,rhs_vec_4567_3, 170), _mm256_shuffle_epi32(lhs_vec_1, 255))); + iacc = mul_sum_i8_pairs_acc_int32x8(iacc, _mm256_blend_epi32(rhs_vec_0123_3 ,_mm256_shuffle_epi32(rhs_vec_4567_3, 177), 170), _mm256_shuffle_epi32(lhs_vec_1, 170)); + iacc = mul_sum_i8_pairs_acc_int32x8(iacc, _mm256_blend_epi32(_mm256_shuffle_epi32(rhs_vec_0123_3, 177) ,rhs_vec_4567_3, 170), _mm256_shuffle_epi32(lhs_vec_1, 255)); // Accumulated values multipled with appropriate scales acc_row = _mm256_fmadd_ps(_mm256_cvtepi32_ps(iacc), _mm256_mul_ps(col_scale_f32, row_scale_f32), acc_row); @@ -3239,22 +3235,15 @@ static void ggml_gemm_q4_0_8x8_q8_0(int n, float * GGML_RESTRICT s, size_t bs, c - __m512i iacc_mat_00_sp1 = - _mm512_add_epi32(_mm512_add_epi32(_mm512_add_epi32(mul_sum_i8_pairs_int32x16(lhs_mat_01_3_sp1, rhs_mat_014589CD_3_sp1), mul_sum_i8_pairs_int32x16(lhs_mat_01_2_sp1, rhs_mat_014589CD_2_sp1)), mul_sum_i8_pairs_int32x16(lhs_mat_01_1_sp1, rhs_mat_014589CD_1_sp1)), mul_sum_i8_pairs_int32x16(lhs_mat_01_0_sp1, rhs_mat_014589CD_0_sp1)); - __m512i iacc_mat_01_sp1 = - _mm512_add_epi32(_mm512_add_epi32(_mm512_add_epi32(mul_sum_i8_pairs_int32x16(lhs_mat_01_3_sp1, rhs_mat_2367ABEF_3_sp1), mul_sum_i8_pairs_int32x16(lhs_mat_01_2_sp1, rhs_mat_2367ABEF_2_sp1)), mul_sum_i8_pairs_int32x16(lhs_mat_01_1_sp1, rhs_mat_2367ABEF_1_sp1)), mul_sum_i8_pairs_int32x16(lhs_mat_01_0_sp1, rhs_mat_2367ABEF_0_sp1)); - __m512i iacc_mat_10_sp1 = - _mm512_add_epi32(_mm512_add_epi32(_mm512_add_epi32(mul_sum_i8_pairs_int32x16(lhs_mat_23_3_sp1, rhs_mat_014589CD_3_sp1), mul_sum_i8_pairs_int32x16(lhs_mat_23_2_sp1, rhs_mat_014589CD_2_sp1)), mul_sum_i8_pairs_int32x16(lhs_mat_23_1_sp1, rhs_mat_014589CD_1_sp1)), mul_sum_i8_pairs_int32x16(lhs_mat_23_0_sp1, rhs_mat_014589CD_0_sp1)); - __m512i iacc_mat_11_sp1 = - _mm512_add_epi32(_mm512_add_epi32(_mm512_add_epi32(mul_sum_i8_pairs_int32x16(lhs_mat_23_3_sp1, rhs_mat_2367ABEF_3_sp1), mul_sum_i8_pairs_int32x16(lhs_mat_23_2_sp1, rhs_mat_2367ABEF_2_sp1)), mul_sum_i8_pairs_int32x16(lhs_mat_23_1_sp1, rhs_mat_2367ABEF_1_sp1)), mul_sum_i8_pairs_int32x16(lhs_mat_23_0_sp1, rhs_mat_2367ABEF_0_sp1)); - __m512i iacc_mat_00_sp2 = - _mm512_add_epi32(_mm512_add_epi32(_mm512_add_epi32(mul_sum_i8_pairs_int32x16(lhs_mat_01_3_sp2, rhs_mat_014589CD_3_sp2), mul_sum_i8_pairs_int32x16(lhs_mat_01_2_sp2, rhs_mat_014589CD_2_sp2)), mul_sum_i8_pairs_int32x16(lhs_mat_01_1_sp2, rhs_mat_014589CD_1_sp2)), mul_sum_i8_pairs_int32x16(lhs_mat_01_0_sp2, rhs_mat_014589CD_0_sp2)); - __m512i iacc_mat_01_sp2 = - _mm512_add_epi32(_mm512_add_epi32(_mm512_add_epi32(mul_sum_i8_pairs_int32x16(lhs_mat_01_3_sp2, rhs_mat_2367ABEF_3_sp2), mul_sum_i8_pairs_int32x16(lhs_mat_01_2_sp2, rhs_mat_2367ABEF_2_sp2)), mul_sum_i8_pairs_int32x16(lhs_mat_01_1_sp2, rhs_mat_2367ABEF_1_sp2)), mul_sum_i8_pairs_int32x16(lhs_mat_01_0_sp2, rhs_mat_2367ABEF_0_sp2)); - __m512i iacc_mat_10_sp2 = - _mm512_add_epi32(_mm512_add_epi32(_mm512_add_epi32(mul_sum_i8_pairs_int32x16(lhs_mat_23_3_sp2, rhs_mat_014589CD_3_sp2), mul_sum_i8_pairs_int32x16(lhs_mat_23_2_sp2, rhs_mat_014589CD_2_sp2)), mul_sum_i8_pairs_int32x16(lhs_mat_23_1_sp2, rhs_mat_014589CD_1_sp2)), mul_sum_i8_pairs_int32x16(lhs_mat_23_0_sp2, rhs_mat_014589CD_0_sp2)); - __m512i iacc_mat_11_sp2 = - _mm512_add_epi32(_mm512_add_epi32(_mm512_add_epi32(mul_sum_i8_pairs_int32x16(lhs_mat_23_3_sp2, rhs_mat_2367ABEF_3_sp2), mul_sum_i8_pairs_int32x16(lhs_mat_23_2_sp2, rhs_mat_2367ABEF_2_sp2)), mul_sum_i8_pairs_int32x16(lhs_mat_23_1_sp2, rhs_mat_2367ABEF_1_sp2)), mul_sum_i8_pairs_int32x16(lhs_mat_23_0_sp2, rhs_mat_2367ABEF_0_sp2)); + const __m512i zero = _mm512_setzero_epi32(); + __m512i iacc_mat_00_sp1 = mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(zero, lhs_mat_01_3_sp1, rhs_mat_014589CD_3_sp1), lhs_mat_01_2_sp1, rhs_mat_014589CD_2_sp1), lhs_mat_01_1_sp1, rhs_mat_014589CD_1_sp1), lhs_mat_01_0_sp1, rhs_mat_014589CD_0_sp1); + __m512i iacc_mat_01_sp1 = mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(zero, lhs_mat_01_3_sp1, rhs_mat_2367ABEF_3_sp1), lhs_mat_01_2_sp1, rhs_mat_2367ABEF_2_sp1), lhs_mat_01_1_sp1, rhs_mat_2367ABEF_1_sp1), lhs_mat_01_0_sp1, rhs_mat_2367ABEF_0_sp1); + __m512i iacc_mat_10_sp1 = mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(zero, lhs_mat_23_3_sp1, rhs_mat_014589CD_3_sp1), lhs_mat_23_2_sp1, rhs_mat_014589CD_2_sp1), lhs_mat_23_1_sp1, rhs_mat_014589CD_1_sp1), lhs_mat_23_0_sp1, rhs_mat_014589CD_0_sp1); + __m512i iacc_mat_11_sp1 = mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(zero, lhs_mat_23_3_sp1, rhs_mat_2367ABEF_3_sp1), lhs_mat_23_2_sp1, rhs_mat_2367ABEF_2_sp1), lhs_mat_23_1_sp1, rhs_mat_2367ABEF_1_sp1), lhs_mat_23_0_sp1, rhs_mat_2367ABEF_0_sp1); + __m512i iacc_mat_00_sp2 = mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(zero, lhs_mat_01_3_sp2, rhs_mat_014589CD_3_sp2), lhs_mat_01_2_sp2, rhs_mat_014589CD_2_sp2), lhs_mat_01_1_sp2, rhs_mat_014589CD_1_sp2), lhs_mat_01_0_sp2, rhs_mat_014589CD_0_sp2); + __m512i iacc_mat_01_sp2 = mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(zero, lhs_mat_01_3_sp2, rhs_mat_2367ABEF_3_sp2), lhs_mat_01_2_sp2, rhs_mat_2367ABEF_2_sp2), lhs_mat_01_1_sp2, rhs_mat_2367ABEF_1_sp2), lhs_mat_01_0_sp2, rhs_mat_2367ABEF_0_sp2); + __m512i iacc_mat_10_sp2 = mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(zero, lhs_mat_23_3_sp2, rhs_mat_014589CD_3_sp2), lhs_mat_23_2_sp2, rhs_mat_014589CD_2_sp2), lhs_mat_23_1_sp2, rhs_mat_014589CD_1_sp2), lhs_mat_23_0_sp2, rhs_mat_014589CD_0_sp2); + __m512i iacc_mat_11_sp2 = mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(zero, lhs_mat_23_3_sp2, rhs_mat_2367ABEF_3_sp2), lhs_mat_23_2_sp2, rhs_mat_2367ABEF_2_sp2), lhs_mat_23_1_sp2, rhs_mat_2367ABEF_1_sp2), lhs_mat_23_0_sp2, rhs_mat_2367ABEF_0_sp2); __m512i iacc_mat_00 = _mm512_add_epi32(iacc_mat_00_sp1, iacc_mat_00_sp2); @@ -3430,22 +3419,15 @@ static void ggml_gemm_q4_0_8x8_q8_0(int n, float * GGML_RESTRICT s, size_t bs, c - __m512i iacc_mat_00_sp1 = - _mm512_add_epi32(_mm512_add_epi32(_mm512_add_epi32(mul_sum_i8_pairs_int32x16(lhs_mat_01_3_sp1, rhs_mat_014589CD_3_sp1), mul_sum_i8_pairs_int32x16(lhs_mat_01_2_sp1, rhs_mat_014589CD_2_sp1)), mul_sum_i8_pairs_int32x16(lhs_mat_01_1_sp1, rhs_mat_014589CD_1_sp1)), mul_sum_i8_pairs_int32x16(lhs_mat_01_0_sp1, rhs_mat_014589CD_0_sp1)); - __m512i iacc_mat_01_sp1 = - _mm512_add_epi32(_mm512_add_epi32(_mm512_add_epi32(mul_sum_i8_pairs_int32x16(lhs_mat_01_3_sp1, rhs_mat_2367ABEF_3_sp1), mul_sum_i8_pairs_int32x16(lhs_mat_01_2_sp1, rhs_mat_2367ABEF_2_sp1)), mul_sum_i8_pairs_int32x16(lhs_mat_01_1_sp1, rhs_mat_2367ABEF_1_sp1)), mul_sum_i8_pairs_int32x16(lhs_mat_01_0_sp1, rhs_mat_2367ABEF_0_sp1)); - __m512i iacc_mat_10_sp1 = - _mm512_add_epi32(_mm512_add_epi32(_mm512_add_epi32(mul_sum_i8_pairs_int32x16(lhs_mat_23_3_sp1, rhs_mat_014589CD_3_sp1), mul_sum_i8_pairs_int32x16(lhs_mat_23_2_sp1, rhs_mat_014589CD_2_sp1)), mul_sum_i8_pairs_int32x16(lhs_mat_23_1_sp1, rhs_mat_014589CD_1_sp1)), mul_sum_i8_pairs_int32x16(lhs_mat_23_0_sp1, rhs_mat_014589CD_0_sp1)); - __m512i iacc_mat_11_sp1 = - _mm512_add_epi32(_mm512_add_epi32(_mm512_add_epi32(mul_sum_i8_pairs_int32x16(lhs_mat_23_3_sp1, rhs_mat_2367ABEF_3_sp1), mul_sum_i8_pairs_int32x16(lhs_mat_23_2_sp1, rhs_mat_2367ABEF_2_sp1)), mul_sum_i8_pairs_int32x16(lhs_mat_23_1_sp1, rhs_mat_2367ABEF_1_sp1)), mul_sum_i8_pairs_int32x16(lhs_mat_23_0_sp1, rhs_mat_2367ABEF_0_sp1)); - __m512i iacc_mat_00_sp2 = - _mm512_add_epi32(_mm512_add_epi32(_mm512_add_epi32(mul_sum_i8_pairs_int32x16(lhs_mat_01_3_sp2, rhs_mat_014589CD_3_sp2), mul_sum_i8_pairs_int32x16(lhs_mat_01_2_sp2, rhs_mat_014589CD_2_sp2)), mul_sum_i8_pairs_int32x16(lhs_mat_01_1_sp2, rhs_mat_014589CD_1_sp2)), mul_sum_i8_pairs_int32x16(lhs_mat_01_0_sp2, rhs_mat_014589CD_0_sp2)); - _mm512_add_epi32(_mm512_add_epi32(_mm512_add_epi32(mul_sum_i8_pairs_int32x16(lhs_mat_01_3_sp2, rhs_mat_2367ABEF_3_sp2), mul_sum_i8_pairs_int32x16(lhs_mat_01_2_sp2, rhs_mat_2367ABEF_2_sp2)), mul_sum_i8_pairs_int32x16(lhs_mat_01_1_sp2, rhs_mat_2367ABEF_1_sp2)), mul_sum_i8_pairs_int32x16(lhs_mat_01_0_sp2, rhs_mat_2367ABEF_0_sp2)); - __m512i iacc_mat_10_sp2 = - _mm512_add_epi32(_mm512_add_epi32(_mm512_add_epi32(mul_sum_i8_pairs_int32x16(lhs_mat_23_3_sp2, rhs_mat_014589CD_3_sp2), mul_sum_i8_pairs_int32x16(lhs_mat_23_2_sp2, rhs_mat_014589CD_2_sp2)), mul_sum_i8_pairs_int32x16(lhs_mat_23_1_sp2, rhs_mat_014589CD_1_sp2)), mul_sum_i8_pairs_int32x16(lhs_mat_23_0_sp2, rhs_mat_014589CD_0_sp2)); - __m512i iacc_mat_11_sp2 = - _mm512_add_epi32(_mm512_add_epi32(_mm512_add_epi32(mul_sum_i8_pairs_int32x16(lhs_mat_23_3_sp2, rhs_mat_2367ABEF_3_sp2), mul_sum_i8_pairs_int32x16(lhs_mat_23_2_sp2, rhs_mat_2367ABEF_2_sp2)), mul_sum_i8_pairs_int32x16(lhs_mat_23_1_sp2, rhs_mat_2367ABEF_1_sp2)), mul_sum_i8_pairs_int32x16(lhs_mat_23_0_sp2, rhs_mat_2367ABEF_0_sp2)); + const __m512i zero = _mm512_setzero_epi32(); + __m512i iacc_mat_00_sp1 = mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(zero, lhs_mat_01_3_sp1, rhs_mat_014589CD_3_sp1), lhs_mat_01_2_sp1, rhs_mat_014589CD_2_sp1), lhs_mat_01_1_sp1, rhs_mat_014589CD_1_sp1), lhs_mat_01_0_sp1, rhs_mat_014589CD_0_sp1); + __m512i iacc_mat_01_sp1 = mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(zero, lhs_mat_01_3_sp1, rhs_mat_2367ABEF_3_sp1), lhs_mat_01_2_sp1, rhs_mat_2367ABEF_2_sp1), lhs_mat_01_1_sp1, rhs_mat_2367ABEF_1_sp1), lhs_mat_01_0_sp1, rhs_mat_2367ABEF_0_sp1); + __m512i iacc_mat_10_sp1 = mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(zero, lhs_mat_23_3_sp1, rhs_mat_014589CD_3_sp1), lhs_mat_23_2_sp1, rhs_mat_014589CD_2_sp1), lhs_mat_23_1_sp1, rhs_mat_014589CD_1_sp1), lhs_mat_23_0_sp1, rhs_mat_014589CD_0_sp1); + __m512i iacc_mat_11_sp1 = mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(zero, lhs_mat_23_3_sp1, rhs_mat_2367ABEF_3_sp1), lhs_mat_23_2_sp1, rhs_mat_2367ABEF_2_sp1), lhs_mat_23_1_sp1, rhs_mat_2367ABEF_1_sp1), lhs_mat_23_0_sp1, rhs_mat_2367ABEF_0_sp1); + __m512i iacc_mat_00_sp2 = mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(zero, lhs_mat_01_3_sp2, rhs_mat_014589CD_3_sp2), lhs_mat_01_2_sp2, rhs_mat_014589CD_2_sp2), lhs_mat_01_1_sp2, rhs_mat_014589CD_1_sp2), lhs_mat_01_0_sp2, rhs_mat_014589CD_0_sp2); + __m512i iacc_mat_01_sp2 = mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(zero, lhs_mat_01_3_sp2, rhs_mat_2367ABEF_3_sp2), lhs_mat_01_2_sp2, rhs_mat_2367ABEF_2_sp2), lhs_mat_01_1_sp2, rhs_mat_2367ABEF_1_sp2), lhs_mat_01_0_sp2, rhs_mat_2367ABEF_0_sp2); + __m512i iacc_mat_10_sp2 = mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(zero, lhs_mat_23_3_sp2, rhs_mat_014589CD_3_sp2), lhs_mat_23_2_sp2, rhs_mat_014589CD_2_sp2), lhs_mat_23_1_sp2, rhs_mat_014589CD_1_sp2), lhs_mat_23_0_sp2, rhs_mat_014589CD_0_sp2); + __m512i iacc_mat_11_sp2 = mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(zero, lhs_mat_23_3_sp2, rhs_mat_2367ABEF_3_sp2), lhs_mat_23_2_sp2, rhs_mat_2367ABEF_2_sp2), lhs_mat_23_1_sp2, rhs_mat_2367ABEF_1_sp2), lhs_mat_23_0_sp2, rhs_mat_2367ABEF_0_sp2); __m512i iacc_mat_00 = _mm512_add_epi32(iacc_mat_00_sp1, iacc_mat_00_sp2); @@ -3605,22 +3587,15 @@ static void ggml_gemm_q4_0_8x8_q8_0(int n, float * GGML_RESTRICT s, size_t bs, c - __m256i iacc_mat_00_sp1 = - _mm256_add_epi32(_mm256_add_epi32(_mm256_add_epi32(mul_sum_i8_pairs_int32x8(lhs_mat_01_3_sp1, rhs_mat_0145_3_sp1), mul_sum_i8_pairs_int32x8(lhs_mat_01_2_sp1, rhs_mat_0145_2_sp1)), mul_sum_i8_pairs_int32x8(lhs_mat_01_1_sp1, rhs_mat_0145_1_sp1)), mul_sum_i8_pairs_int32x8(lhs_mat_01_0_sp1, rhs_mat_0145_0_sp1)); - __m256i iacc_mat_01_sp1 = - _mm256_add_epi32(_mm256_add_epi32(_mm256_add_epi32(mul_sum_i8_pairs_int32x8(lhs_mat_01_3_sp1, rhs_mat_2367_3_sp1), mul_sum_i8_pairs_int32x8(lhs_mat_01_2_sp1, rhs_mat_2367_2_sp1)), mul_sum_i8_pairs_int32x8(lhs_mat_01_1_sp1, rhs_mat_2367_1_sp1)), mul_sum_i8_pairs_int32x8(lhs_mat_01_0_sp1, rhs_mat_2367_0_sp1)); - __m256i iacc_mat_10_sp1 = - _mm256_add_epi32(_mm256_add_epi32(_mm256_add_epi32(mul_sum_i8_pairs_int32x8(lhs_mat_23_3_sp1, rhs_mat_0145_3_sp1), mul_sum_i8_pairs_int32x8(lhs_mat_23_2_sp1, rhs_mat_0145_2_sp1)), mul_sum_i8_pairs_int32x8(lhs_mat_23_1_sp1, rhs_mat_0145_1_sp1)), mul_sum_i8_pairs_int32x8(lhs_mat_23_0_sp1, rhs_mat_0145_0_sp1)); - __m256i iacc_mat_11_sp1 = - _mm256_add_epi32(_mm256_add_epi32(_mm256_add_epi32(mul_sum_i8_pairs_int32x8(lhs_mat_23_3_sp1, rhs_mat_2367_3_sp1), mul_sum_i8_pairs_int32x8(lhs_mat_23_2_sp1, rhs_mat_2367_2_sp1)), mul_sum_i8_pairs_int32x8(lhs_mat_23_1_sp1, rhs_mat_2367_1_sp1)), mul_sum_i8_pairs_int32x8(lhs_mat_23_0_sp1, rhs_mat_2367_0_sp1)); - __m256i iacc_mat_00_sp2 = - _mm256_add_epi32(_mm256_add_epi32(_mm256_add_epi32(mul_sum_i8_pairs_int32x8(lhs_mat_01_3_sp2, rhs_mat_0145_3_sp2), mul_sum_i8_pairs_int32x8(lhs_mat_01_2_sp2, rhs_mat_0145_2_sp2)), mul_sum_i8_pairs_int32x8(lhs_mat_01_1_sp2, rhs_mat_0145_1_sp2)), mul_sum_i8_pairs_int32x8(lhs_mat_01_0_sp2, rhs_mat_0145_0_sp2)); - __m256i iacc_mat_01_sp2 = - _mm256_add_epi32(_mm256_add_epi32(_mm256_add_epi32(mul_sum_i8_pairs_int32x8(lhs_mat_01_3_sp2, rhs_mat_2367_3_sp2), mul_sum_i8_pairs_int32x8(lhs_mat_01_2_sp2, rhs_mat_2367_2_sp2)), mul_sum_i8_pairs_int32x8(lhs_mat_01_1_sp2, rhs_mat_2367_1_sp2)), mul_sum_i8_pairs_int32x8(lhs_mat_01_0_sp2, rhs_mat_2367_0_sp2)); - __m256i iacc_mat_10_sp2 = - _mm256_add_epi32(_mm256_add_epi32(_mm256_add_epi32(mul_sum_i8_pairs_int32x8(lhs_mat_23_3_sp2, rhs_mat_0145_3_sp2), mul_sum_i8_pairs_int32x8(lhs_mat_23_2_sp2, rhs_mat_0145_2_sp2)), mul_sum_i8_pairs_int32x8(lhs_mat_23_1_sp2, rhs_mat_0145_1_sp2)), mul_sum_i8_pairs_int32x8(lhs_mat_23_0_sp2, rhs_mat_0145_0_sp2)); - __m256i iacc_mat_11_sp2 = - _mm256_add_epi32(_mm256_add_epi32(_mm256_add_epi32(mul_sum_i8_pairs_int32x8(lhs_mat_23_3_sp2, rhs_mat_2367_3_sp2), mul_sum_i8_pairs_int32x8(lhs_mat_23_2_sp2, rhs_mat_2367_2_sp2)), mul_sum_i8_pairs_int32x8(lhs_mat_23_1_sp2, rhs_mat_2367_1_sp2)), mul_sum_i8_pairs_int32x8(lhs_mat_23_0_sp2, rhs_mat_2367_0_sp2)); + const __m256i zero = _mm256_setzero_si256(); + __m256i iacc_mat_00_sp1 = mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(zero, lhs_mat_01_3_sp1, rhs_mat_0145_3_sp1), lhs_mat_01_2_sp1, rhs_mat_0145_2_sp1), lhs_mat_01_1_sp1, rhs_mat_0145_1_sp1), lhs_mat_01_0_sp1, rhs_mat_0145_0_sp1); + __m256i iacc_mat_01_sp1 = mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(zero, lhs_mat_01_3_sp1, rhs_mat_2367_3_sp1), lhs_mat_01_2_sp1, rhs_mat_2367_2_sp1), lhs_mat_01_1_sp1, rhs_mat_2367_1_sp1), lhs_mat_01_0_sp1, rhs_mat_2367_0_sp1); + __m256i iacc_mat_10_sp1 = mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(zero, lhs_mat_23_3_sp1, rhs_mat_0145_3_sp1), lhs_mat_23_2_sp1, rhs_mat_0145_2_sp1), lhs_mat_23_1_sp1, rhs_mat_0145_1_sp1), lhs_mat_23_0_sp1, rhs_mat_0145_0_sp1); + __m256i iacc_mat_11_sp1 = mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(zero, lhs_mat_23_3_sp1, rhs_mat_2367_3_sp1), lhs_mat_23_2_sp1, rhs_mat_2367_2_sp1), lhs_mat_23_1_sp1, rhs_mat_2367_1_sp1), lhs_mat_23_0_sp1, rhs_mat_2367_0_sp1); + __m256i iacc_mat_00_sp2 = mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(zero, lhs_mat_01_3_sp2, rhs_mat_0145_3_sp2), lhs_mat_01_2_sp2, rhs_mat_0145_2_sp2), lhs_mat_01_1_sp2, rhs_mat_0145_1_sp2), lhs_mat_01_0_sp2, rhs_mat_0145_0_sp2); + __m256i iacc_mat_01_sp2 = mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(zero, lhs_mat_01_3_sp2, rhs_mat_2367_3_sp2), lhs_mat_01_2_sp2, rhs_mat_2367_2_sp2), lhs_mat_01_1_sp2, rhs_mat_2367_1_sp2), lhs_mat_01_0_sp2, rhs_mat_2367_0_sp2); + __m256i iacc_mat_10_sp2 = mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(zero, lhs_mat_23_3_sp2, rhs_mat_0145_3_sp2), lhs_mat_23_2_sp2, rhs_mat_0145_2_sp2), lhs_mat_23_1_sp2, rhs_mat_0145_1_sp2), lhs_mat_23_0_sp2, rhs_mat_0145_0_sp2); + __m256i iacc_mat_11_sp2 = mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(zero, lhs_mat_23_3_sp2, rhs_mat_2367_3_sp2), lhs_mat_23_2_sp2, rhs_mat_2367_2_sp2), lhs_mat_23_1_sp2, rhs_mat_2367_1_sp2), lhs_mat_23_0_sp2, rhs_mat_2367_0_sp2); __m256i iacc_mat_00 = _mm256_add_epi32(iacc_mat_00_sp1, iacc_mat_00_sp2); @@ -3769,22 +3744,15 @@ static void ggml_gemm_q4_0_8x8_q8_0(int n, float * GGML_RESTRICT s, size_t bs, c - __m256i iacc_mat_00_sp1 = - _mm256_add_epi32(_mm256_add_epi32(_mm256_add_epi32(mul_sum_i8_pairs_int32x8(lhs_mat_01_3_sp1, rhs_mat_0145_3_sp1), mul_sum_i8_pairs_int32x8(lhs_mat_01_2_sp1, rhs_mat_0145_2_sp1)), mul_sum_i8_pairs_int32x8(lhs_mat_01_1_sp1, rhs_mat_0145_1_sp1)), mul_sum_i8_pairs_int32x8(lhs_mat_01_0_sp1, rhs_mat_0145_0_sp1)); - __m256i iacc_mat_01_sp1 = - _mm256_add_epi32(_mm256_add_epi32(_mm256_add_epi32(mul_sum_i8_pairs_int32x8(lhs_mat_01_3_sp1, rhs_mat_2367_3_sp1), mul_sum_i8_pairs_int32x8(lhs_mat_01_2_sp1, rhs_mat_2367_2_sp1)), mul_sum_i8_pairs_int32x8(lhs_mat_01_1_sp1, rhs_mat_2367_1_sp1)), mul_sum_i8_pairs_int32x8(lhs_mat_01_0_sp1, rhs_mat_2367_0_sp1)); - __m256i iacc_mat_10_sp1 = - _mm256_add_epi32(_mm256_add_epi32(_mm256_add_epi32(mul_sum_i8_pairs_int32x8(lhs_mat_23_3_sp1, rhs_mat_0145_3_sp1), mul_sum_i8_pairs_int32x8(lhs_mat_23_2_sp1, rhs_mat_0145_2_sp1)), mul_sum_i8_pairs_int32x8(lhs_mat_23_1_sp1, rhs_mat_0145_1_sp1)), mul_sum_i8_pairs_int32x8(lhs_mat_23_0_sp1, rhs_mat_0145_0_sp1)); - __m256i iacc_mat_11_sp1 = - _mm256_add_epi32(_mm256_add_epi32(_mm256_add_epi32(mul_sum_i8_pairs_int32x8(lhs_mat_23_3_sp1, rhs_mat_2367_3_sp1), mul_sum_i8_pairs_int32x8(lhs_mat_23_2_sp1, rhs_mat_2367_2_sp1)), mul_sum_i8_pairs_int32x8(lhs_mat_23_1_sp1, rhs_mat_2367_1_sp1)), mul_sum_i8_pairs_int32x8(lhs_mat_23_0_sp1, rhs_mat_2367_0_sp1)); - __m256i iacc_mat_00_sp2 = - _mm256_add_epi32(_mm256_add_epi32(_mm256_add_epi32(mul_sum_i8_pairs_int32x8(lhs_mat_01_3_sp2, rhs_mat_0145_3_sp2), mul_sum_i8_pairs_int32x8(lhs_mat_01_2_sp2, rhs_mat_0145_2_sp2)), mul_sum_i8_pairs_int32x8(lhs_mat_01_1_sp2, rhs_mat_0145_1_sp2)), mul_sum_i8_pairs_int32x8(lhs_mat_01_0_sp2, rhs_mat_0145_0_sp2)); - __m256i iacc_mat_01_sp2 = - _mm256_add_epi32(_mm256_add_epi32(_mm256_add_epi32(mul_sum_i8_pairs_int32x8(lhs_mat_01_3_sp2, rhs_mat_2367_3_sp2), mul_sum_i8_pairs_int32x8(lhs_mat_01_2_sp2, rhs_mat_2367_2_sp2)), mul_sum_i8_pairs_int32x8(lhs_mat_01_1_sp2, rhs_mat_2367_1_sp2)), mul_sum_i8_pairs_int32x8(lhs_mat_01_0_sp2, rhs_mat_2367_0_sp2)); - __m256i iacc_mat_10_sp2 = - _mm256_add_epi32(_mm256_add_epi32(_mm256_add_epi32(mul_sum_i8_pairs_int32x8(lhs_mat_23_3_sp2, rhs_mat_0145_3_sp2), mul_sum_i8_pairs_int32x8(lhs_mat_23_2_sp2, rhs_mat_0145_2_sp2)), mul_sum_i8_pairs_int32x8(lhs_mat_23_1_sp2, rhs_mat_0145_1_sp2)), mul_sum_i8_pairs_int32x8(lhs_mat_23_0_sp2, rhs_mat_0145_0_sp2)); - __m256i iacc_mat_11_sp2 = + const __m256i zero = _mm256_setzero_si256(); + __m256i iacc_mat_00_sp1 = mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(zero, lhs_mat_01_3_sp1, rhs_mat_0145_3_sp1), lhs_mat_01_2_sp1, rhs_mat_0145_2_sp1), lhs_mat_01_1_sp1, rhs_mat_0145_1_sp1), lhs_mat_01_0_sp1, rhs_mat_0145_0_sp1); + __m256i iacc_mat_01_sp1 = mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(zero, lhs_mat_01_3_sp1, rhs_mat_2367_3_sp1), lhs_mat_01_2_sp1, rhs_mat_2367_2_sp1), lhs_mat_01_1_sp1, rhs_mat_2367_1_sp1), lhs_mat_01_0_sp1, rhs_mat_2367_0_sp1); + __m256i iacc_mat_10_sp1 = mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(zero, lhs_mat_23_3_sp1, rhs_mat_0145_3_sp1), lhs_mat_23_2_sp1, rhs_mat_0145_2_sp1), lhs_mat_23_1_sp1, rhs_mat_0145_1_sp1), lhs_mat_23_0_sp1, rhs_mat_0145_0_sp1); + __m256i iacc_mat_11_sp1 = mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(zero, lhs_mat_23_3_sp1, rhs_mat_2367_3_sp1), lhs_mat_23_2_sp1, rhs_mat_2367_2_sp1), lhs_mat_23_1_sp1, rhs_mat_2367_1_sp1), lhs_mat_23_0_sp1, rhs_mat_2367_0_sp1); + __m256i iacc_mat_00_sp2 = mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(zero, lhs_mat_01_3_sp2, rhs_mat_0145_3_sp2), lhs_mat_01_2_sp2, rhs_mat_0145_2_sp2), lhs_mat_01_1_sp2, rhs_mat_0145_1_sp2), lhs_mat_01_0_sp2, rhs_mat_0145_0_sp2); + __m256i iacc_mat_01_sp2 = mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(zero, lhs_mat_01_3_sp2, rhs_mat_2367_3_sp2), lhs_mat_01_2_sp2, rhs_mat_2367_2_sp2), lhs_mat_01_1_sp2, rhs_mat_2367_1_sp2), lhs_mat_01_0_sp2, rhs_mat_2367_0_sp2); + __m256i iacc_mat_10_sp2 = mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(zero, lhs_mat_23_3_sp2, rhs_mat_0145_3_sp2), lhs_mat_23_2_sp2, rhs_mat_0145_2_sp2), lhs_mat_23_1_sp2, rhs_mat_0145_1_sp2), lhs_mat_23_0_sp2, rhs_mat_0145_0_sp2); + __m256i iacc_mat_11_sp2 = mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(zero, lhs_mat_23_3_sp2, rhs_mat_2367_3_sp2), lhs_mat_23_2_sp2, rhs_mat_2367_2_sp2), lhs_mat_23_1_sp2, rhs_mat_2367_1_sp2), lhs_mat_23_0_sp2, rhs_mat_2367_0_sp2); __m256i iacc_mat_00 = _mm256_add_epi32(iacc_mat_00_sp1, iacc_mat_00_sp2);
[ "-static inline __m512i sum_i16_pairs_int_32x16(const __m512i x) {", "+// add int16_t pairwise and return as 512 bit int vector, then add the accumulator", "+ return _mm512_add_epi32(acc, _mm512_madd_epi16(ones, x));", "-// multiply int8_t, add results pairwise twice and return as 512 bit int vector", "-static inline __m512i mul_sum_i8_pairs_int32x16(const __m512i x, const __m512i y) {", "+// multiply int8_t, add results pairwise twice and return as 512 bit int vector,then add the accumulator", "+static inline __m256i sum_i16_pairs_acc_int32x8(const __m256i acc, const __m256i x) {", "- return _mm256_madd_epi16(ones, x);", "+ return _mm256_dpbusd_epi32(acc, ax, sy);", "+// multiply int8_t, add results pairwise twice and return as 256 bit int vector, then add the accumulator", "- iacc = _mm256_add_epi32(iacc, mul_sum_i8_pairs_int32x8(_mm256_blend_epi32(_mm256_shuffle_epi32(rhs_vec_0123_0, 177) ,rhs_vec_4567_0, 170), _mm256_shuffle_epi32(lhs_vec_0, 85)));", "+ iacc = mul_sum_i8_pairs_acc_int32x8(iacc, _mm256_blend_epi32(rhs_vec_0123_0 ,_mm256_shuffle_epi32(rhs_vec_4567_0, 177), 170), _mm256_shuffle_epi32(lhs_vec_0, 0));", "- iacc = _mm256_add_epi32(iacc, mul_sum_i8_pairs_int32x8(_mm256_blend_epi32(rhs_vec_0123_1 ,_mm256_shuffle_epi32(rhs_vec_4567_1, 177), 170), _mm256_shuffle_epi32(lhs_vec_0, 170)));", "+ iacc = mul_sum_i8_pairs_acc_int32x8(iacc, _mm256_blend_epi32(rhs_vec_0123_1 ,_mm256_shuffle_epi32(rhs_vec_4567_1, 177), 170), _mm256_shuffle_epi32(lhs_vec_0, 170));", "- __m512i iacc_mat_01_sp2 =", "- _mm256_add_epi32(_mm256_add_epi32(_mm256_add_epi32(mul_sum_i8_pairs_int32x8(lhs_mat_23_3_sp2, rhs_mat_2367_3_sp2), mul_sum_i8_pairs_int32x8(lhs_mat_23_2_sp2, rhs_mat_2367_2_sp2)), mul_sum_i8_pairs_int32x8(lhs_mat_23_1_sp2, rhs_mat_2367_1_sp2)), mul_sum_i8_pairs_int32x8(lhs_mat_23_0_sp2, rhs_mat_2367_0_sp2));" ]
[ 9, 10, 14, 31, 32, 33, 49, 51, 60, 79, 98, 99, 102, 104, 165, 234 ]
{ "additions": 65, "author": "SongXiaoXi", "deletions": 97, "html_url": "https://github.com/ggml-org/llama.cpp/pull/12773", "issue_id": 12773, "merged_at": "2025-04-14T05:47:55Z", "omission_probability": 0.1, "pr_number": 12773, "repo": "ggml-org/llama.cpp", "title": "ggml: use _mm[512/256]_dpbusd[_avx]_epi32 to directly accumulate into the result register", "total_changes": 162 }
823
diff --git a/common/chat.cpp b/common/chat.cpp index 62ca26ad7609c..bbc5f087cdcc0 100644 --- a/common/chat.cpp +++ b/common/chat.cpp @@ -1622,7 +1622,7 @@ static common_chat_params common_chat_templates_apply_jinja( } // Hermes 2/3 Pro, Qwen 2.5 Instruct (w/ tools) - if (src.find("<tool_call>") != std::string::npos && params.json_schema.is_null()) { + if (src.find("<tool_call>") != std::string::npos && params.json_schema.is_null() && params.tools.is_array() && params.json_schema.is_null()) { return common_chat_params_init_hermes_2_pro(tmpl, params); } diff --git a/tests/test-chat.cpp b/tests/test-chat.cpp index a1034b1a41b12..a0bf6affe5220 100644 --- a/tests/test-chat.cpp +++ b/tests/test-chat.cpp @@ -569,6 +569,7 @@ static void test_template_output_parsers() { { // Not supported yet auto tmpls = read_templates("models/templates/CohereForAI-c4ai-command-r-plus-tool_use.jinja"); + assert_equals(COMMON_CHAT_FORMAT_CONTENT_ONLY, common_chat_templates_apply(tmpls.get(), inputs_no_tools).format); assert_equals(COMMON_CHAT_FORMAT_GENERIC, common_chat_templates_apply(tmpls.get(), inputs_tools).format); } { @@ -665,6 +666,7 @@ static void test_template_output_parsers() { auto tmpls = read_templates("models/templates/NousResearch-Hermes-2-Pro-Llama-3-8B-tool_use.jinja"); std::vector<std::string> end_tokens{ "<|im_end|>" }; + assert_equals(COMMON_CHAT_FORMAT_CONTENT_ONLY, common_chat_templates_apply(tmpls.get(), inputs_no_tools).format); assert_equals(COMMON_CHAT_FORMAT_HERMES_2_PRO, common_chat_templates_apply(tmpls.get(), inputs_tools).format); assert_equals( COMMON_CHAT_FORMAT_HERMES_2_PRO, @@ -793,6 +795,7 @@ static void test_template_output_parsers() { auto tmpls = read_templates("models/templates/meta-llama-Llama-3.1-8B-Instruct.jinja"); std::vector<std::string> end_tokens{ "<|eom_id|>", "<|eot_id|>" }; + assert_equals(COMMON_CHAT_FORMAT_CONTENT_ONLY, common_chat_templates_apply(tmpls.get(), inputs_no_tools).format); assert_equals(COMMON_CHAT_FORMAT_LLAMA_3_X, common_chat_templates_apply(tmpls.get(), inputs_tools).format); assert_equals(COMMON_CHAT_FORMAT_LLAMA_3_X_WITH_BUILTIN_TOOLS, common_chat_templates_apply(tmpls.get(), inputs_tools_builtin).format); @@ -815,6 +818,7 @@ static void test_template_output_parsers() { std::vector<std::string> end_tokens{ "<|eom_id|>", "<|eot_id|>" }; assert_equals(COMMON_CHAT_FORMAT_LLAMA_3_X, common_chat_templates_apply(tmpls.get(), inputs_tools).format); + assert_equals(COMMON_CHAT_FORMAT_CONTENT_ONLY, common_chat_templates_apply(tmpls.get(), inputs_no_tools).format); test_templates(tmpls.get(), end_tokens, message_assist, tools, "Hello, world!\nWhat's up?", /* expect_grammar_triggered= */ false); test_templates(tmpls.get(), end_tokens, message_assist_call, tools, @@ -824,6 +828,8 @@ static void test_template_output_parsers() { auto tmpls = read_templates("models/templates/meetkai-functionary-medium-v3.1.jinja"); std::vector<std::string> end_tokens{ "<|eom_id|>", "<|eot_id|>" }; + assert_equals(COMMON_CHAT_FORMAT_CONTENT_ONLY, + common_chat_templates_apply(tmpls.get(), inputs_no_tools).format); assert_equals(COMMON_CHAT_FORMAT_FUNCTIONARY_V3_1_LLAMA_3_1, common_chat_templates_apply(tmpls.get(), inputs_tools).format); @@ -851,6 +857,7 @@ static void test_template_output_parsers() { auto tmpls = read_templates("models/templates/fireworks-ai-llama-3-firefunction-v2.jinja"); std::vector<std::string> end_tokens{ "<|eot_id|>" }; + assert_equals(COMMON_CHAT_FORMAT_CONTENT_ONLY, common_chat_templates_apply(tmpls.get(), inputs_no_tools).format); assert_equals(COMMON_CHAT_FORMAT_FIREFUNCTION_V2, common_chat_templates_apply(tmpls.get(), inputs_tools).format); test_templates(tmpls.get(), end_tokens, message_assist, tools, "Hello, world!\nWhat's up?", /* expect_grammar_triggered= */ false); @@ -862,6 +869,7 @@ static void test_template_output_parsers() { auto tmpls = read_templates("models/templates/deepseek-ai-DeepSeek-R1-Distill-Llama-8B.jinja"); std::vector<std::string> end_tokens{ "<|end▁of▁sentence|>" }; + assert_equals(COMMON_CHAT_FORMAT_DEEPSEEK_R1, common_chat_templates_apply(tmpls.get(), inputs_no_tools).format); assert_equals(COMMON_CHAT_FORMAT_DEEPSEEK_R1, common_chat_templates_apply(tmpls.get(), inputs_tools).format); assert_equals(COMMON_CHAT_FORMAT_DEEPSEEK_R1_EXTRACT_REASONING, common_chat_templates_apply(tmpls.get(), inputs_tools_think).format); @@ -891,6 +899,7 @@ static void test_template_output_parsers() { auto tmpls = read_templates("models/templates/llama-cpp-deepseek-r1.jinja"); std::vector<std::string> end_tokens{ "<|end▁of▁sentence|>" }; + assert_equals(COMMON_CHAT_FORMAT_DEEPSEEK_R1, common_chat_templates_apply(tmpls.get(), inputs_no_tools).format); assert_equals(COMMON_CHAT_FORMAT_DEEPSEEK_R1, common_chat_templates_apply(tmpls.get(), inputs_tools).format); assert_equals(COMMON_CHAT_FORMAT_DEEPSEEK_R1_EXTRACT_REASONING, common_chat_templates_apply(tmpls.get(), inputs_tools_think).format);
diff --git a/common/chat.cpp b/common/chat.cpp index 62ca26ad7609c..bbc5f087cdcc0 100644 --- a/common/chat.cpp +++ b/common/chat.cpp @@ -1622,7 +1622,7 @@ static common_chat_params common_chat_templates_apply_jinja( // Hermes 2/3 Pro, Qwen 2.5 Instruct (w/ tools) - if (src.find("<tool_call>") != std::string::npos && params.json_schema.is_null()) { + if (src.find("<tool_call>") != std::string::npos && params.json_schema.is_null() && params.tools.is_array() && params.json_schema.is_null()) { return common_chat_params_init_hermes_2_pro(tmpl, params); diff --git a/tests/test-chat.cpp b/tests/test-chat.cpp index a1034b1a41b12..a0bf6affe5220 100644 --- a/tests/test-chat.cpp +++ b/tests/test-chat.cpp @@ -569,6 +569,7 @@ static void test_template_output_parsers() { // Not supported yet auto tmpls = read_templates("models/templates/CohereForAI-c4ai-command-r-plus-tool_use.jinja"); assert_equals(COMMON_CHAT_FORMAT_GENERIC, common_chat_templates_apply(tmpls.get(), inputs_tools).format); @@ -665,6 +666,7 @@ static void test_template_output_parsers() { auto tmpls = read_templates("models/templates/NousResearch-Hermes-2-Pro-Llama-3-8B-tool_use.jinja"); std::vector<std::string> end_tokens{ "<|im_end|>" }; assert_equals(COMMON_CHAT_FORMAT_HERMES_2_PRO, common_chat_templates_apply(tmpls.get(), inputs_tools).format); assert_equals( COMMON_CHAT_FORMAT_HERMES_2_PRO, @@ -793,6 +795,7 @@ static void test_template_output_parsers() { auto tmpls = read_templates("models/templates/meta-llama-Llama-3.1-8B-Instruct.jinja"); assert_equals(COMMON_CHAT_FORMAT_LLAMA_3_X_WITH_BUILTIN_TOOLS, common_chat_templates_apply(tmpls.get(), inputs_tools_builtin).format); @@ -815,6 +818,7 @@ static void test_template_output_parsers() { test_templates(tmpls.get(), end_tokens, message_assist_call, tools, @@ -824,6 +828,8 @@ static void test_template_output_parsers() { auto tmpls = read_templates("models/templates/meetkai-functionary-medium-v3.1.jinja"); assert_equals(COMMON_CHAT_FORMAT_FUNCTIONARY_V3_1_LLAMA_3_1, common_chat_templates_apply(tmpls.get(), inputs_tools).format); @@ -851,6 +857,7 @@ static void test_template_output_parsers() { auto tmpls = read_templates("models/templates/fireworks-ai-llama-3-firefunction-v2.jinja"); std::vector<std::string> end_tokens{ "<|eot_id|>" }; assert_equals(COMMON_CHAT_FORMAT_FIREFUNCTION_V2, common_chat_templates_apply(tmpls.get(), inputs_tools).format); @@ -862,6 +869,7 @@ static void test_template_output_parsers() { auto tmpls = read_templates("models/templates/deepseek-ai-DeepSeek-R1-Distill-Llama-8B.jinja"); @@ -891,6 +899,7 @@ static void test_template_output_parsers() { auto tmpls = read_templates("models/templates/llama-cpp-deepseek-r1.jinja");
[ "+ assert_equals(COMMON_CHAT_FORMAT_CONTENT_ONLY,", "+ common_chat_templates_apply(tmpls.get(), inputs_no_tools).format);" ]
[ 53, 54 ]
{ "additions": 10, "author": "ochafik", "deletions": 1, "html_url": "https://github.com/ggml-org/llama.cpp/pull/12900", "issue_id": 12900, "merged_at": "2025-04-11T19:47:52Z", "omission_probability": 0.1, "pr_number": 12900, "repo": "ggml-org/llama.cpp", "title": "`tool-call`: fix non-tool-calling grammar crashes w/ Qwen / Hermes 2 templates", "total_changes": 11 }
824
diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/flash_attn_cm2.comp b/ggml/src/ggml-vulkan/vulkan-shaders/flash_attn_cm2.comp index a8f4bc41726c2..e1baa85f9e330 100644 --- a/ggml/src/ggml-vulkan/vulkan-shaders/flash_attn_cm2.comp +++ b/ggml/src/ggml-vulkan/vulkan-shaders/flash_attn_cm2.comp @@ -201,6 +201,11 @@ void main() { uint32_t q_stride = p.gqa_ratio > 1 ? (p.nb02 / 4) : p.nb01; uint32_t k_stride = p.nb11; uint32_t v_stride = p.nb21; + // When using grouped query attention, all rows use the same mask (stride 0). + // "p.gqa_ratio >> 16" is just a roundabout way of writing zero + // that prevents the compiler from folding the "&" through the select + // and breaking the alignment detection. + uint32_t m_stride = (p.gqa_ratio > 1) ? (p.gqa_ratio >> 16) : KV; // hint to the compiler that strides are aligned for the aligned variant of the shader if (Clamp != gl_CooperativeMatrixClampModeConstantNV) { @@ -209,6 +214,7 @@ void main() { k_stride &= ~7; v_stride &= ~7; #endif + m_stride &= ~7; } tensorLayoutQ = setTensorLayoutStrideNV(tensorLayoutQ, q_stride, 1); tensorLayoutK = setTensorLayoutStrideNV(tensorLayoutK, k_stride, 1); @@ -261,10 +267,7 @@ void main() { if (p.mask != 0) { tensorLayoutNV<2, Clamp> tensorLayoutM = createTensorLayoutNV(2, Clamp); tensorLayoutM = setTensorLayoutDimensionNV(tensorLayoutM, p.nem1, KV); - // When using grouped query attention, all rows use the same mask. - if (p.gqa_ratio > 1) { - tensorLayoutM = setTensorLayoutStrideNV(tensorLayoutM, 0, 1); - } + tensorLayoutM = setTensorLayoutStrideNV(tensorLayoutM, m_stride, 1); coopmat<float16_t, gl_ScopeWorkgroup, Br, Bc, gl_MatrixUseAccumulator> mv;
diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/flash_attn_cm2.comp b/ggml/src/ggml-vulkan/vulkan-shaders/flash_attn_cm2.comp index a8f4bc41726c2..e1baa85f9e330 100644 --- a/ggml/src/ggml-vulkan/vulkan-shaders/flash_attn_cm2.comp +++ b/ggml/src/ggml-vulkan/vulkan-shaders/flash_attn_cm2.comp @@ -201,6 +201,11 @@ void main() { uint32_t q_stride = p.gqa_ratio > 1 ? (p.nb02 / 4) : p.nb01; uint32_t k_stride = p.nb11; uint32_t v_stride = p.nb21; + // When using grouped query attention, all rows use the same mask (stride 0). + // "p.gqa_ratio >> 16" is just a roundabout way of writing zero + // that prevents the compiler from folding the "&" through the select + // and breaking the alignment detection. + uint32_t m_stride = (p.gqa_ratio > 1) ? (p.gqa_ratio >> 16) : KV; // hint to the compiler that strides are aligned for the aligned variant of the shader if (Clamp != gl_CooperativeMatrixClampModeConstantNV) { @@ -209,6 +214,7 @@ void main() { k_stride &= ~7; v_stride &= ~7; #endif + m_stride &= ~7; } tensorLayoutQ = setTensorLayoutStrideNV(tensorLayoutQ, q_stride, 1); tensorLayoutK = setTensorLayoutStrideNV(tensorLayoutK, k_stride, 1); @@ -261,10 +267,7 @@ void main() { if (p.mask != 0) { tensorLayoutNV<2, Clamp> tensorLayoutM = createTensorLayoutNV(2, Clamp); tensorLayoutM = setTensorLayoutDimensionNV(tensorLayoutM, p.nem1, KV); - // When using grouped query attention, all rows use the same mask. - if (p.gqa_ratio > 1) { - } + tensorLayoutM = setTensorLayoutStrideNV(tensorLayoutM, m_stride, 1); coopmat<float16_t, gl_ScopeWorkgroup, Br, Bc, gl_MatrixUseAccumulator> mv;
[ "- tensorLayoutM = setTensorLayoutStrideNV(tensorLayoutM, 0, 1);" ]
[ 30 ]
{ "additions": 7, "author": "jeffbolznv", "deletions": 4, "html_url": "https://github.com/ggml-org/llama.cpp/pull/12853", "issue_id": 12853, "merged_at": "2025-04-12T08:44:48Z", "omission_probability": 0.1, "pr_number": 12853, "repo": "ggml-org/llama.cpp", "title": "vulkan: use aligned loads for flash attention mask", "total_changes": 11 }
825
diff --git a/examples/llava/clip.cpp b/examples/llava/clip.cpp index a55b3f3835184..49c90b7506e73 100644 --- a/examples/llava/clip.cpp +++ b/examples/llava/clip.cpp @@ -323,8 +323,8 @@ struct clip_ctx { std::vector<ggml_backend_t> backend_ptrs; std::vector<ggml_backend_buffer_type_t> backend_buft; - ggml_backend_ptr backend; - ggml_backend_ptr backend_cpu; + ggml_backend_t backend; + ggml_backend_t backend_cpu; ggml_backend_buffer_ptr buf; ggml_backend_sched_ptr sched; @@ -332,27 +332,34 @@ struct clip_ctx { clip_image_size load_image_size; clip_ctx(clip_context_params & ctx_params) { - backend_cpu = ggml_backend_ptr(ggml_backend_init_by_type(GGML_BACKEND_DEVICE_TYPE_CPU, nullptr)); - backend = ggml_backend_ptr(ctx_params.use_gpu + backend_cpu = ggml_backend_init_by_type(GGML_BACKEND_DEVICE_TYPE_CPU, nullptr); + backend = ctx_params.use_gpu ? ggml_backend_init_by_type(GGML_BACKEND_DEVICE_TYPE_GPU, nullptr) - : nullptr); + : nullptr; if (backend) { - LOG_INF("%s: CLIP using %s backend\n", __func__, ggml_backend_name(backend.get())); - backend_ptrs.push_back(backend.get()); - backend_buft.push_back(ggml_backend_get_default_buffer_type(backend.get())); + LOG_INF("%s: CLIP using %s backend\n", __func__, ggml_backend_name(backend)); + backend_ptrs.push_back(backend); + backend_buft.push_back(ggml_backend_get_default_buffer_type(backend)); } else { - backend = std::move(backend_cpu); + backend = backend_cpu; LOG_INF("%s: CLIP using CPU backend\n", __func__); } - backend_ptrs.push_back(backend_cpu.get()); - backend_buft.push_back(ggml_backend_get_default_buffer_type(backend_cpu.get())); + backend_ptrs.push_back(backend_cpu); + backend_buft.push_back(ggml_backend_get_default_buffer_type(backend_cpu)); sched.reset( ggml_backend_sched_new(backend_ptrs.data(), backend_buft.data(), backend_ptrs.size(), 8192, false) ); } + + ~clip_ctx() { + ggml_backend_free(backend); + if (backend != backend_cpu) { + ggml_backend_free(backend_cpu); + } + } }; static ggml_cgraph * clip_image_build_graph_siglip(clip_ctx * ctx, const clip_image_f32_batch & imgs) { @@ -1428,7 +1435,7 @@ struct clip_model_loader { } // alloc memory and offload data - ggml_backend_buffer_type_t buft = ggml_backend_get_default_buffer_type(ctx_clip.backend.get()); + ggml_backend_buffer_type_t buft = ggml_backend_get_default_buffer_type(ctx_clip.backend); ctx_clip.buf.reset(ggml_backend_alloc_ctx_tensors_from_buft(ctx_clip.ctx_data.get(), buft)); ggml_backend_buffer_set_usage(ctx_clip.buf.get(), GGML_BACKEND_BUFFER_USAGE_WEIGHTS); for (auto & t : tensors_to_load) { @@ -2610,7 +2617,7 @@ bool clip_image_batch_encode(clip_ctx * ctx, const int n_threads, const clip_ima } } - ggml_backend_cpu_set_n_threads(ctx->backend_cpu.get(), n_threads); + ggml_backend_cpu_set_n_threads(ctx->backend_cpu, n_threads); auto status = ggml_backend_sched_graph_compute(ctx->sched.get(), gf); if (status != GGML_STATUS_SUCCESS) {
diff --git a/examples/llava/clip.cpp b/examples/llava/clip.cpp index a55b3f3835184..49c90b7506e73 100644 --- a/examples/llava/clip.cpp +++ b/examples/llava/clip.cpp @@ -323,8 +323,8 @@ struct clip_ctx { std::vector<ggml_backend_t> backend_ptrs; std::vector<ggml_backend_buffer_type_t> backend_buft; - ggml_backend_ptr backend; - ggml_backend_ptr backend_cpu; + ggml_backend_t backend; + ggml_backend_t backend_cpu; ggml_backend_buffer_ptr buf; ggml_backend_sched_ptr sched; @@ -332,27 +332,34 @@ struct clip_ctx { clip_image_size load_image_size; clip_ctx(clip_context_params & ctx_params) { - backend_cpu = ggml_backend_ptr(ggml_backend_init_by_type(GGML_BACKEND_DEVICE_TYPE_CPU, nullptr)); - backend = ggml_backend_ptr(ctx_params.use_gpu + backend_cpu = ggml_backend_init_by_type(GGML_BACKEND_DEVICE_TYPE_CPU, nullptr); + backend = ctx_params.use_gpu ? ggml_backend_init_by_type(GGML_BACKEND_DEVICE_TYPE_GPU, nullptr) - : nullptr); + : nullptr; if (backend) { - LOG_INF("%s: CLIP using %s backend\n", __func__, ggml_backend_name(backend.get())); - backend_ptrs.push_back(backend.get()); - backend_buft.push_back(ggml_backend_get_default_buffer_type(backend.get())); + LOG_INF("%s: CLIP using %s backend\n", __func__, ggml_backend_name(backend)); + backend_ptrs.push_back(backend); + backend_buft.push_back(ggml_backend_get_default_buffer_type(backend)); } else { - backend = std::move(backend_cpu); LOG_INF("%s: CLIP using CPU backend\n", __func__); - backend_ptrs.push_back(backend_cpu.get()); - backend_buft.push_back(ggml_backend_get_default_buffer_type(backend_cpu.get())); + backend_ptrs.push_back(backend_cpu); + backend_buft.push_back(ggml_backend_get_default_buffer_type(backend_cpu)); sched.reset( ggml_backend_sched_new(backend_ptrs.data(), backend_buft.data(), backend_ptrs.size(), 8192, false) ); + + ~clip_ctx() { + if (backend != backend_cpu) { + ggml_backend_free(backend_cpu); + } }; static ggml_cgraph * clip_image_build_graph_siglip(clip_ctx * ctx, const clip_image_f32_batch & imgs) { @@ -1428,7 +1435,7 @@ struct clip_model_loader { } // alloc memory and offload data - ggml_backend_buffer_type_t buft = ggml_backend_get_default_buffer_type(ctx_clip.backend.get()); + ggml_backend_buffer_type_t buft = ggml_backend_get_default_buffer_type(ctx_clip.backend); ctx_clip.buf.reset(ggml_backend_alloc_ctx_tensors_from_buft(ctx_clip.ctx_data.get(), buft)); ggml_backend_buffer_set_usage(ctx_clip.buf.get(), GGML_BACKEND_BUFFER_USAGE_WEIGHTS); for (auto & t : tensors_to_load) { @@ -2610,7 +2617,7 @@ bool clip_image_batch_encode(clip_ctx * ctx, const int n_threads, const clip_ima - ggml_backend_cpu_set_n_threads(ctx->backend_cpu.get(), n_threads); + ggml_backend_cpu_set_n_threads(ctx->backend_cpu, n_threads); auto status = ggml_backend_sched_graph_compute(ctx->sched.get(), gf); if (status != GGML_STATUS_SUCCESS) {
[ "+ backend = backend_cpu;", "+ ggml_backend_free(backend);", "+ }" ]
[ 36, 51, 54 ]
{ "additions": 20, "author": "mattjcly", "deletions": 13, "html_url": "https://github.com/ggml-org/llama.cpp/pull/12907", "issue_id": 12907, "merged_at": "2025-04-12T05:29:03Z", "omission_probability": 0.1, "pr_number": 12907, "repo": "ggml-org/llama.cpp", "title": "llava: Fix cpu-only clip image encoding sefault", "total_changes": 33 }
826
diff --git a/docs/build.md b/docs/build.md index aa1db9a047130..9c1314a29431b 100644 --- a/docs/build.md +++ b/docs/build.md @@ -191,7 +191,7 @@ The following compilation options are also available to tweak performance: | Option | Legal values | Default | Description | |-------------------------------|------------------------|---------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| GGML_CUDA_FORCE_MMQ | Boolean | false | Force the use of custom matrix multiplication kernels for quantized models instead of FP16 cuBLAS even if there is no int8 tensor core implementation available (affects V100, RDNA3). MMQ kernels are enabled by default on GPUs with int8 tensor core support. With MMQ force enabled, speed for large batch sizes will be worse but VRAM consumption will be lower. | +| GGML_CUDA_FORCE_MMQ | Boolean | false | Force the use of custom matrix multiplication kernels for quantized models instead of FP16 cuBLAS even if there is no int8 tensor core implementation available (affects V100, CDNA and RDNA3+). MMQ kernels are enabled by default on GPUs with int8 tensor core support. With MMQ force enabled, speed for large batch sizes will be worse but VRAM consumption will be lower. | | GGML_CUDA_FORCE_CUBLAS | Boolean | false | Force the use of FP16 cuBLAS instead of custom matrix multiplication kernels for quantized models | | GGML_CUDA_F16 | Boolean | false | If enabled, use half-precision floating point arithmetic for the CUDA dequantization + mul mat vec kernels and for the q4_1 and q5_1 matrix matrix multiplication kernels. Can improve performance on relatively recent GPUs. | | GGML_CUDA_PEER_MAX_BATCH_SIZE | Positive integer | 128 | Maximum batch size for which to enable peer access between multiple GPUs. Peer access requires either Linux or NVLink. When using NVLink enabling peer access for larger batch sizes is potentially beneficial. | diff --git a/ggml/src/ggml-cuda/common.cuh b/ggml/src/ggml-cuda/common.cuh index 954ff5f16924b..f8c55a2b869fc 100644 --- a/ggml/src/ggml-cuda/common.cuh +++ b/ggml/src/ggml-cuda/common.cuh @@ -52,7 +52,7 @@ #define GGML_CUDA_CC_IS_NVIDIA(cc) (cc < GGML_CUDA_CC_OFFSET_MTHREADS) // AMD -// GCN/CNDA, wave size is 64 +// GCN/CDNA, wave size is 64 #define GGML_CUDA_CC_GCN4 (GGML_CUDA_CC_OFFSET_AMD + 0x803) // Tonga, Fiji, Polaris, minimum for fast fp16 #define GGML_CUDA_CC_VEGA (GGML_CUDA_CC_OFFSET_AMD + 0x900) // Vega56/64, minimum for fp16 dual issue #define GGML_CUDA_CC_VEGA20 (GGML_CUDA_CC_OFFSET_AMD + 0x906) // MI50/Radeon VII, minimum for dp4a @@ -60,16 +60,18 @@ #define GGML_CUDA_CC_CDNA2 (GGML_CUDA_CC_OFFSET_AMD + 0x910) // MI210, minimum acc register renameing #define GGML_CUDA_CC_CDNA3 (GGML_CUDA_CC_OFFSET_AMD + 0x942) // MI300 -// RNDA removes MFMA, dp4a, xnack, acc registers, wave size is 32 +// RDNA removes MFMA, dp4a, xnack, acc registers, wave size is 32 #define GGML_CUDA_CC_RDNA1 (GGML_CUDA_CC_OFFSET_AMD + 0x1010) // RX 5000 #define GGML_CUDA_CC_RDNA2 (GGML_CUDA_CC_OFFSET_AMD + 0x1030) // RX 6000, minimum for dp4a #define GGML_CUDA_CC_RDNA3 (GGML_CUDA_CC_OFFSET_AMD + 0x1100) // RX 7000, minimum for WMMA +#define GGML_CUDA_CC_RDNA4 (GGML_CUDA_CC_OFFSET_AMD + 0x1200) // RX 9000 #define GGML_CUDA_CC_IS_AMD(cc) (cc >= GGML_CUDA_CC_OFFSET_AMD) #define GGML_CUDA_CC_IS_RDNA(cc) (cc >= GGML_CUDA_CC_RDNA1) #define GGML_CUDA_CC_IS_RDNA1(cc) (cc >= GGML_CUDA_CC_RDNA1 && cc < GGML_CUDA_CC_RDNA2) #define GGML_CUDA_CC_IS_RDNA2(cc) (cc >= GGML_CUDA_CC_RDNA2 && cc < GGML_CUDA_CC_RDNA3) -#define GGML_CUDA_CC_IS_RDNA3(cc) (cc >= GGML_CUDA_CC_RDNA3) +#define GGML_CUDA_CC_IS_RDNA3(cc) (cc >= GGML_CUDA_CC_RDNA3 && cc < GGML_CUDA_CC_RDNA4) +#define GGML_CUDA_CC_IS_RDNA4(cc) (cc >= GGML_CUDA_CC_RDNA4) #define GGML_CUDA_CC_IS_GCN(cc) (cc > GGML_CUDA_CC_OFFSET_AMD && cc < GGML_CUDA_CC_CDNA) #define GGML_CUDA_CC_IS_CDNA(cc) (cc >= GGML_CUDA_CC_CDNA && cc < GGML_CUDA_CC_RDNA1) @@ -209,9 +211,9 @@ typedef float2 dfloat2; #define FP16_MMA_AVAILABLE #endif // !(defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__)) && __CUDA_ARCH__ >= GGML_CUDA_CC_VOLTA -#if defined(GGML_HIP_ROCWMMA_FATTN) && (defined(CDNA) || defined(RDNA3)) +#if defined(GGML_HIP_ROCWMMA_FATTN) && (defined(CDNA) || defined(RDNA3) || defined(RDNA4)) #define FP16_MMA_AVAILABLE -#endif // defined(GGML_HIP_ROCWMMA_FATTN) && (defined(CDNA) || defined(RDNA3)) +#endif // defined(GGML_HIP_ROCWMMA_FATTN) && (defined(CDNA) || defined(RDNA3) || defined(RDNA4)) #if !(defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__)) && __CUDA_ARCH__ >= GGML_CUDA_CC_TURING #define NEW_MMA_AVAILABLE @@ -244,14 +246,14 @@ static bool fp16_mma_available(const int cc) { return false; #else return (GGML_CUDA_CC_IS_NVIDIA(cc) && ggml_cuda_highest_compiled_arch(cc) >= GGML_CUDA_CC_VOLTA) || - GGML_CUDA_CC_IS_CDNA(cc) || GGML_CUDA_CC_IS_RDNA3(cc); + GGML_CUDA_CC_IS_CDNA(cc) || GGML_CUDA_CC_IS_RDNA3(cc) || GGML_CUDA_CC_IS_RDNA4(cc); #endif // defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__) && !defined(GGML_HIP_ROCWMMA_FATTN) } // To be used for feature selection of external libraries, e.g. cuBLAS. static bool fp16_mma_hardware_available(const int cc) { return (GGML_CUDA_CC_IS_NVIDIA(cc) && cc >= GGML_CUDA_CC_VOLTA) || - GGML_CUDA_CC_IS_CDNA(cc) || GGML_CUDA_CC_IS_RDNA3(cc); + GGML_CUDA_CC_IS_CDNA(cc) || GGML_CUDA_CC_IS_RDNA3(cc) || GGML_CUDA_CC_IS_RDNA4(cc); } // Volta technically had FP16 tensor cores but they work very differently compared to Turing and later. @@ -409,7 +411,7 @@ static __device__ __forceinline__ int ggml_cuda_dp4a(const int a, const int b, i #if defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__) #if defined(CDNA) || defined(RDNA2) || defined(__gfx906__) c = __builtin_amdgcn_sdot4(a, b, c, false); -#elif defined(RDNA3) +#elif defined(RDNA3) || defined(RDNA4) c = __builtin_amdgcn_sudot4( true, a, true, b, c, false); #elif defined(RDNA1) || defined(__gfx900__) int tmp1; diff --git a/ggml/src/ggml-cuda/ggml-cuda.cu b/ggml/src/ggml-cuda/ggml-cuda.cu index 6dd5dcb85e15c..3bb472ffbfdf2 100644 --- a/ggml/src/ggml-cuda/ggml-cuda.cu +++ b/ggml/src/ggml-cuda/ggml-cuda.cu @@ -1216,7 +1216,7 @@ static void ggml_cuda_op_mul_mat_cublas( CUBLAS_CHECK(cublasSetStream(ctx.cublas_handle(id), stream)); - if (GGML_CUDA_CC_IS_CDNA(cc)) { + if (GGML_CUDA_CC_IS_CDNA(cc) || GGML_CUDA_CC_IS_RDNA4(cc)) { const float alpha = 1.0f; const float beta = 0.0f; CUBLAS_CHECK( @@ -1759,7 +1759,9 @@ static void ggml_cuda_mul_mat_batched_cublas(ggml_backend_cuda_context & ctx, co beta = &beta_f32; } - if (GGML_CUDA_CC_IS_CDNA(ggml_cuda_info().devices[ctx.device].cc)) { + int id = ggml_cuda_get_device(); + const int cc = ggml_cuda_info().devices[id].cc; + if (GGML_CUDA_CC_IS_CDNA(cc) || GGML_CUDA_CC_IS_RDNA4(cc)) { cu_compute_type = CUBLAS_COMPUTE_32F; alpha = &alpha_f32; beta = &beta_f32; @@ -1836,7 +1838,7 @@ static void ggml_cuda_mul_mat_batched_cublas(ggml_backend_cuda_context & ctx, co } #endif - if (dst->op_params[0] == GGML_PREC_DEFAULT) { + if (dst->op_params[0] == GGML_PREC_DEFAULT && cu_data_type == CUDA_R_16F) { const to_fp32_cuda_t to_fp32_cuda = ggml_get_to_fp32_cuda(GGML_TYPE_F16); to_fp32_cuda(dst_f16.get(), dst_ddf, ne_dst, main_stream); } diff --git a/ggml/src/ggml-cuda/mmq.cu b/ggml/src/ggml-cuda/mmq.cu index 2c19485d51a92..b36b43d5417ba 100644 --- a/ggml/src/ggml-cuda/mmq.cu +++ b/ggml/src/ggml-cuda/mmq.cu @@ -149,5 +149,5 @@ bool ggml_cuda_should_use_mmq(enum ggml_type type, int cc, int64_t ne11) { return !fp16_mma_hardware_available(cc) || ne11 < MMQ_DP4A_MAX_BATCH_SIZE; } - return (!GGML_CUDA_CC_IS_RDNA3(cc) && !GGML_CUDA_CC_IS_CDNA(cc)) || ne11 < MMQ_DP4A_MAX_BATCH_SIZE; + return (!GGML_CUDA_CC_IS_RDNA4(cc) && !GGML_CUDA_CC_IS_RDNA3(cc) && !GGML_CUDA_CC_IS_CDNA(cc)) || ne11 < MMQ_DP4A_MAX_BATCH_SIZE; } diff --git a/ggml/src/ggml-cuda/mmq.cuh b/ggml/src/ggml-cuda/mmq.cuh index ee01154254e3d..f136c41955b19 100644 --- a/ggml/src/ggml-cuda/mmq.cuh +++ b/ggml/src/ggml-cuda/mmq.cuh @@ -2577,9 +2577,9 @@ static __device__ void mul_mat_q_process_tile( template <ggml_type type, int mmq_x, int nwarps, bool need_check> #if defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__) -#if defined(RDNA3) || defined(RDNA2) || defined(CDNA) || defined(GCN) +#if defined(RDNA4) || defined(RDNA3) || defined(RDNA2) || defined(CDNA) || defined(GCN) __launch_bounds__(WARP_SIZE*nwarps, 2) -#endif // defined(RDNA3) || defined(RDNA2) || defined(CDNA) || defined(GCN) +#endif // defined(RDNA4) || defined(RDNA3) || defined(RDNA2) || defined(CDNA) || defined(GCN) #else #if __CUDA_ARCH__ >= GGML_CUDA_CC_VOLTA __launch_bounds__(WARP_SIZE*nwarps, 1) diff --git a/ggml/src/ggml-cuda/mmvq.cu b/ggml/src/ggml-cuda/mmvq.cu index a7d518a574ddc..45ea30f62df08 100644 --- a/ggml/src/ggml-cuda/mmvq.cu +++ b/ggml/src/ggml-cuda/mmvq.cu @@ -54,7 +54,7 @@ enum mmvq_parameter_table_id { }; static constexpr __device__ mmvq_parameter_table_id get_device_table_id() { -#if defined(RDNA2) || defined(RDNA3) +#if defined(RDNA2) || defined(RDNA3) || defined(RDNA4) return MMVQ_PARAMETERS_RDNA2; #elif defined(GCN) || defined(CDNA) return MMVQ_PARAMETERS_GCN; @@ -64,7 +64,7 @@ static constexpr __device__ mmvq_parameter_table_id get_device_table_id() { } static __host__ mmvq_parameter_table_id get_device_table_id(int cc) { - if (GGML_CUDA_CC_IS_RDNA2(cc) || GGML_CUDA_CC_IS_RDNA3(cc)) { + if (GGML_CUDA_CC_IS_RDNA2(cc) || GGML_CUDA_CC_IS_RDNA3(cc) || GGML_CUDA_CC_IS_RDNA4(cc)) { return MMVQ_PARAMETERS_RDNA2; } if (GGML_CUDA_CC_IS_GCN(cc) || GGML_CUDA_CC_IS_CDNA(cc)) { diff --git a/ggml/src/ggml-cuda/vendors/hip.h b/ggml/src/ggml-cuda/vendors/hip.h index a4c717a321cfb..3983ce5b423c0 100644 --- a/ggml/src/ggml-cuda/vendors/hip.h +++ b/ggml/src/ggml-cuda/vendors/hip.h @@ -151,6 +151,10 @@ #define CDNA #endif +#if defined(__GFX12__) +#define RDNA4 +#endif + #if defined(__gfx1100__) || defined(__gfx1101__) || defined(__gfx1102__) || defined(__gfx1103__) || \ defined(__gfx1150__) || defined(__gfx1151__) #define RDNA3
diff --git a/docs/build.md b/docs/build.md index aa1db9a047130..9c1314a29431b 100644 --- a/docs/build.md +++ b/docs/build.md @@ -191,7 +191,7 @@ The following compilation options are also available to tweak performance: | Option | Legal values | Default | Description | |-------------------------------|------------------------|---------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| GGML_CUDA_FORCE_MMQ | Boolean | false | Force the use of custom matrix multiplication kernels for quantized models instead of FP16 cuBLAS even if there is no int8 tensor core implementation available (affects V100, RDNA3). MMQ kernels are enabled by default on GPUs with int8 tensor core support. With MMQ force enabled, speed for large batch sizes will be worse but VRAM consumption will be lower. | +| GGML_CUDA_FORCE_MMQ | Boolean | false | Force the use of custom matrix multiplication kernels for quantized models instead of FP16 cuBLAS even if there is no int8 tensor core implementation available (affects V100, CDNA and RDNA3+). MMQ kernels are enabled by default on GPUs with int8 tensor core support. With MMQ force enabled, speed for large batch sizes will be worse but VRAM consumption will be lower. | | GGML_CUDA_FORCE_CUBLAS | Boolean | false | Force the use of FP16 cuBLAS instead of custom matrix multiplication kernels for quantized models | | GGML_CUDA_F16 | Boolean | false | If enabled, use half-precision floating point arithmetic for the CUDA dequantization + mul mat vec kernels and for the q4_1 and q5_1 matrix matrix multiplication kernels. Can improve performance on relatively recent GPUs. | | GGML_CUDA_PEER_MAX_BATCH_SIZE | Positive integer | 128 | Maximum batch size for which to enable peer access between multiple GPUs. Peer access requires either Linux or NVLink. When using NVLink enabling peer access for larger batch sizes is potentially beneficial. | diff --git a/ggml/src/ggml-cuda/common.cuh b/ggml/src/ggml-cuda/common.cuh index 954ff5f16924b..f8c55a2b869fc 100644 --- a/ggml/src/ggml-cuda/common.cuh +++ b/ggml/src/ggml-cuda/common.cuh @@ -52,7 +52,7 @@ #define GGML_CUDA_CC_IS_NVIDIA(cc) (cc < GGML_CUDA_CC_OFFSET_MTHREADS) // AMD -// GCN/CNDA, wave size is 64 +// GCN/CDNA, wave size is 64 #define GGML_CUDA_CC_GCN4 (GGML_CUDA_CC_OFFSET_AMD + 0x803) // Tonga, Fiji, Polaris, minimum for fast fp16 #define GGML_CUDA_CC_VEGA (GGML_CUDA_CC_OFFSET_AMD + 0x900) // Vega56/64, minimum for fp16 dual issue #define GGML_CUDA_CC_VEGA20 (GGML_CUDA_CC_OFFSET_AMD + 0x906) // MI50/Radeon VII, minimum for dp4a @@ -60,16 +60,18 @@ #define GGML_CUDA_CC_CDNA2 (GGML_CUDA_CC_OFFSET_AMD + 0x910) // MI210, minimum acc register renameing #define GGML_CUDA_CC_CDNA3 (GGML_CUDA_CC_OFFSET_AMD + 0x942) // MI300 -// RNDA removes MFMA, dp4a, xnack, acc registers, wave size is 32 +// RDNA removes MFMA, dp4a, xnack, acc registers, wave size is 32 #define GGML_CUDA_CC_RDNA1 (GGML_CUDA_CC_OFFSET_AMD + 0x1010) // RX 5000 #define GGML_CUDA_CC_RDNA2 (GGML_CUDA_CC_OFFSET_AMD + 0x1030) // RX 6000, minimum for dp4a #define GGML_CUDA_CC_RDNA3 (GGML_CUDA_CC_OFFSET_AMD + 0x1100) // RX 7000, minimum for WMMA +#define GGML_CUDA_CC_RDNA4 (GGML_CUDA_CC_OFFSET_AMD + 0x1200) // RX 9000 #define GGML_CUDA_CC_IS_AMD(cc) (cc >= GGML_CUDA_CC_OFFSET_AMD) #define GGML_CUDA_CC_IS_RDNA(cc) (cc >= GGML_CUDA_CC_RDNA1) #define GGML_CUDA_CC_IS_RDNA1(cc) (cc >= GGML_CUDA_CC_RDNA1 && cc < GGML_CUDA_CC_RDNA2) #define GGML_CUDA_CC_IS_RDNA2(cc) (cc >= GGML_CUDA_CC_RDNA2 && cc < GGML_CUDA_CC_RDNA3) -#define GGML_CUDA_CC_IS_RDNA3(cc) (cc >= GGML_CUDA_CC_RDNA3) +#define GGML_CUDA_CC_IS_RDNA3(cc) (cc >= GGML_CUDA_CC_RDNA3 && cc < GGML_CUDA_CC_RDNA4) +#define GGML_CUDA_CC_IS_RDNA4(cc) (cc >= GGML_CUDA_CC_RDNA4) #define GGML_CUDA_CC_IS_GCN(cc) (cc > GGML_CUDA_CC_OFFSET_AMD && cc < GGML_CUDA_CC_CDNA) #define GGML_CUDA_CC_IS_CDNA(cc) (cc >= GGML_CUDA_CC_CDNA && cc < GGML_CUDA_CC_RDNA1) @@ -209,9 +211,9 @@ typedef float2 dfloat2; #endif // !(defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__)) && __CUDA_ARCH__ >= GGML_CUDA_CC_VOLTA +#if defined(GGML_HIP_ROCWMMA_FATTN) && (defined(CDNA) || defined(RDNA3) || defined(RDNA4)) -#endif // defined(GGML_HIP_ROCWMMA_FATTN) && (defined(CDNA) || defined(RDNA3)) #if !(defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__)) && __CUDA_ARCH__ >= GGML_CUDA_CC_TURING #define NEW_MMA_AVAILABLE @@ -244,14 +246,14 @@ static bool fp16_mma_available(const int cc) { return false; return (GGML_CUDA_CC_IS_NVIDIA(cc) && ggml_cuda_highest_compiled_arch(cc) >= GGML_CUDA_CC_VOLTA) || #endif // defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__) && !defined(GGML_HIP_ROCWMMA_FATTN) // To be used for feature selection of external libraries, e.g. cuBLAS. static bool fp16_mma_hardware_available(const int cc) { return (GGML_CUDA_CC_IS_NVIDIA(cc) && cc >= GGML_CUDA_CC_VOLTA) || // Volta technically had FP16 tensor cores but they work very differently compared to Turing and later. @@ -409,7 +411,7 @@ static __device__ __forceinline__ int ggml_cuda_dp4a(const int a, const int b, i #if defined(CDNA) || defined(RDNA2) || defined(__gfx906__) c = __builtin_amdgcn_sdot4(a, b, c, false); -#elif defined(RDNA3) +#elif defined(RDNA3) || defined(RDNA4) c = __builtin_amdgcn_sudot4( true, a, true, b, c, false); #elif defined(RDNA1) || defined(__gfx900__) int tmp1; diff --git a/ggml/src/ggml-cuda/ggml-cuda.cu b/ggml/src/ggml-cuda/ggml-cuda.cu index 6dd5dcb85e15c..3bb472ffbfdf2 100644 --- a/ggml/src/ggml-cuda/ggml-cuda.cu +++ b/ggml/src/ggml-cuda/ggml-cuda.cu @@ -1216,7 +1216,7 @@ static void ggml_cuda_op_mul_mat_cublas( CUBLAS_CHECK(cublasSetStream(ctx.cublas_handle(id), stream)); - if (GGML_CUDA_CC_IS_CDNA(cc)) { + if (GGML_CUDA_CC_IS_CDNA(cc) || GGML_CUDA_CC_IS_RDNA4(cc)) { const float alpha = 1.0f; const float beta = 0.0f; CUBLAS_CHECK( @@ -1759,7 +1759,9 @@ static void ggml_cuda_mul_mat_batched_cublas(ggml_backend_cuda_context & ctx, co - if (GGML_CUDA_CC_IS_CDNA(ggml_cuda_info().devices[ctx.device].cc)) { + int id = ggml_cuda_get_device(); + const int cc = ggml_cuda_info().devices[id].cc; + if (GGML_CUDA_CC_IS_CDNA(cc) || GGML_CUDA_CC_IS_RDNA4(cc)) { cu_compute_type = CUBLAS_COMPUTE_32F; alpha = &alpha_f32; @@ -1836,7 +1838,7 @@ static void ggml_cuda_mul_mat_batched_cublas(ggml_backend_cuda_context & ctx, co - if (dst->op_params[0] == GGML_PREC_DEFAULT) { + if (dst->op_params[0] == GGML_PREC_DEFAULT && cu_data_type == CUDA_R_16F) { const to_fp32_cuda_t to_fp32_cuda = ggml_get_to_fp32_cuda(GGML_TYPE_F16); to_fp32_cuda(dst_f16.get(), dst_ddf, ne_dst, main_stream); diff --git a/ggml/src/ggml-cuda/mmq.cu b/ggml/src/ggml-cuda/mmq.cu index 2c19485d51a92..b36b43d5417ba 100644 --- a/ggml/src/ggml-cuda/mmq.cu +++ b/ggml/src/ggml-cuda/mmq.cu @@ -149,5 +149,5 @@ bool ggml_cuda_should_use_mmq(enum ggml_type type, int cc, int64_t ne11) { return !fp16_mma_hardware_available(cc) || ne11 < MMQ_DP4A_MAX_BATCH_SIZE; - return (!GGML_CUDA_CC_IS_RDNA3(cc) && !GGML_CUDA_CC_IS_CDNA(cc)) || ne11 < MMQ_DP4A_MAX_BATCH_SIZE; + return (!GGML_CUDA_CC_IS_RDNA4(cc) && !GGML_CUDA_CC_IS_RDNA3(cc) && !GGML_CUDA_CC_IS_CDNA(cc)) || ne11 < MMQ_DP4A_MAX_BATCH_SIZE; diff --git a/ggml/src/ggml-cuda/mmq.cuh b/ggml/src/ggml-cuda/mmq.cuh index ee01154254e3d..f136c41955b19 100644 --- a/ggml/src/ggml-cuda/mmq.cuh +++ b/ggml/src/ggml-cuda/mmq.cuh @@ -2577,9 +2577,9 @@ static __device__ void mul_mat_q_process_tile( template <ggml_type type, int mmq_x, int nwarps, bool need_check> -#if defined(RDNA3) || defined(RDNA2) || defined(CDNA) || defined(GCN) +#if defined(RDNA4) || defined(RDNA3) || defined(RDNA2) || defined(CDNA) || defined(GCN) __launch_bounds__(WARP_SIZE*nwarps, 2) -#endif // defined(RDNA3) || defined(RDNA2) || defined(CDNA) || defined(GCN) +#endif // defined(RDNA4) || defined(RDNA3) || defined(RDNA2) || defined(CDNA) || defined(GCN) #if __CUDA_ARCH__ >= GGML_CUDA_CC_VOLTA __launch_bounds__(WARP_SIZE*nwarps, 1) diff --git a/ggml/src/ggml-cuda/mmvq.cu b/ggml/src/ggml-cuda/mmvq.cu index a7d518a574ddc..45ea30f62df08 100644 --- a/ggml/src/ggml-cuda/mmvq.cu +++ b/ggml/src/ggml-cuda/mmvq.cu @@ -54,7 +54,7 @@ enum mmvq_parameter_table_id { }; static constexpr __device__ mmvq_parameter_table_id get_device_table_id() { -#if defined(RDNA2) || defined(RDNA3) +#if defined(RDNA2) || defined(RDNA3) || defined(RDNA4) return MMVQ_PARAMETERS_RDNA2; #elif defined(GCN) || defined(CDNA) return MMVQ_PARAMETERS_GCN; @@ -64,7 +64,7 @@ static constexpr __device__ mmvq_parameter_table_id get_device_table_id() { static __host__ mmvq_parameter_table_id get_device_table_id(int cc) { - if (GGML_CUDA_CC_IS_RDNA2(cc) || GGML_CUDA_CC_IS_RDNA3(cc)) { return MMVQ_PARAMETERS_RDNA2; if (GGML_CUDA_CC_IS_GCN(cc) || GGML_CUDA_CC_IS_CDNA(cc)) { diff --git a/ggml/src/ggml-cuda/vendors/hip.h b/ggml/src/ggml-cuda/vendors/hip.h index a4c717a321cfb..3983ce5b423c0 100644 --- a/ggml/src/ggml-cuda/vendors/hip.h +++ b/ggml/src/ggml-cuda/vendors/hip.h @@ -151,6 +151,10 @@ #define CDNA +#if defined(__GFX12__) +#endif + #if defined(__gfx1100__) || defined(__gfx1101__) || defined(__gfx1102__) || defined(__gfx1103__) || \ defined(__gfx1150__) || defined(__gfx1151__) #define RDNA3
[ "-#if defined(GGML_HIP_ROCWMMA_FATTN) && (defined(CDNA) || defined(RDNA3))", "+#endif // defined(GGML_HIP_ROCWMMA_FATTN) && (defined(CDNA) || defined(RDNA3) || defined(RDNA4))", "+ if (GGML_CUDA_CC_IS_RDNA2(cc) || GGML_CUDA_CC_IS_RDNA3(cc) || GGML_CUDA_CC_IS_RDNA4(cc)) {", "+#define RDNA4" ]
[ 51, 55, 163, 176 ]
{ "additions": 25, "author": "slojosic-amd", "deletions": 17, "html_url": "https://github.com/ggml-org/llama.cpp/pull/12372", "issue_id": 12372, "merged_at": "2025-03-26T22:46:30Z", "omission_probability": 0.1, "pr_number": 12372, "repo": "ggml-org/llama.cpp", "title": "Add support for new gfx1200 and gfx1201 targets", "total_changes": 42 }
827
diff --git a/README.md b/README.md index 95a05e6ed75c1..9211ebfa1627a 100644 --- a/README.md +++ b/README.md @@ -265,7 +265,9 @@ The [Hugging Face](https://huggingface.co) platform hosts a [number of LLMs](htt - [Trending](https://huggingface.co/models?library=gguf&sort=trending) - [LLaMA](https://huggingface.co/models?sort=trending&search=llama+gguf) -You can either manually download the GGUF file or directly use any `llama.cpp`-compatible models from Hugging Face by using this CLI argument: `-hf <user>/<model>[:quant]` +You can either manually download the GGUF file or directly use any `llama.cpp`-compatible models from [Hugging Face](https://huggingface.co/) or other model hosting sites, such as [ModelScope](https://modelscope.cn/), by using this CLI argument: `-hf <user>/<model>[:quant]`. + +By default, the CLI would download from Hugging Face, you can switch to other options with the environment variable `MODEL_ENDPOINT`. For example, you may opt to downloading model checkpoints from ModelScope or other model sharing communities by setting the environment variable, e.g. `MODEL_ENDPOINT=https://www.modelscope.cn/`. After downloading a model, use the CLI tools to run it locally - see below. diff --git a/common/arg.cpp b/common/arg.cpp index 642fefb57548f..0b57f9da1eec2 100644 --- a/common/arg.cpp +++ b/common/arg.cpp @@ -228,12 +228,13 @@ static bool common_download_file_single(const std::string & url, const std::stri curl_easy_setopt(curl.get(), CURLOPT_URL, url.c_str()); curl_easy_setopt(curl.get(), CURLOPT_FOLLOWLOCATION, 1L); + http_headers.ptr = curl_slist_append(http_headers.ptr, "User-Agent: llama-cpp"); // Check if hf-token or bearer-token was specified if (!bearer_token.empty()) { std::string auth_header = "Authorization: Bearer " + bearer_token; http_headers.ptr = curl_slist_append(http_headers.ptr, auth_header.c_str()); - curl_easy_setopt(curl.get(), CURLOPT_HTTPHEADER, http_headers.ptr); } + curl_easy_setopt(curl.get(), CURLOPT_HTTPHEADER, http_headers.ptr); #if defined(_WIN32) // CURLSSLOPT_NATIVE_CA tells libcurl to use standard certificate store of @@ -544,7 +545,10 @@ static struct common_hf_file_res common_get_hf_file(const std::string & hf_repo_ curl_ptr curl(curl_easy_init(), &curl_easy_cleanup); curl_slist_ptr http_headers; std::string res_str; - std::string url = "https://huggingface.co/v2/" + hf_repo + "/manifests/" + tag; + + std::string model_endpoint = get_model_endpoint(); + + std::string url = model_endpoint + "v2/" + hf_repo + "/manifests/" + tag; curl_easy_setopt(curl.get(), CURLOPT_URL, url.c_str()); curl_easy_setopt(curl.get(), CURLOPT_NOPROGRESS, 1L); typedef size_t(*CURLOPT_WRITEFUNCTION_PTR)(void * ptr, size_t size, size_t nmemb, void * data); @@ -659,13 +663,8 @@ static void common_params_handle_model( } } - std::string hf_endpoint = "https://huggingface.co/"; - const char * hf_endpoint_env = getenv("HF_ENDPOINT"); - if (hf_endpoint_env) { - hf_endpoint = hf_endpoint_env; - if (hf_endpoint.back() != '/') hf_endpoint += '/'; - } - model.url = hf_endpoint + model.hf_repo + "/resolve/main/" + model.hf_file; + std::string model_endpoint = get_model_endpoint(); + model.url = model_endpoint + model.hf_repo + "/resolve/main/" + model.hf_file; // make sure model path is present (for caching purposes) if (model.path.empty()) { // this is to avoid different repo having same file name, or same file name in different subdirs diff --git a/common/common.cpp b/common/common.cpp index d4882c5123cce..4e1a020d017a1 100644 --- a/common/common.cpp +++ b/common/common.cpp @@ -1027,6 +1027,19 @@ struct common_init_result common_init_from_params(common_params & params) { return iparams; } +std::string get_model_endpoint() { + const char * model_endpoint_env = getenv("MODEL_ENDPOINT"); + // We still respect the use of environment-variable "HF_ENDPOINT" for backward-compatibility. + const char * hf_endpoint_env = getenv("HF_ENDPOINT"); + const char * endpoint_env = model_endpoint_env ? model_endpoint_env : hf_endpoint_env; + std::string model_endpoint = "https://huggingface.co/"; + if (endpoint_env) { + model_endpoint = endpoint_env; + if (model_endpoint.back() != '/') model_endpoint += '/'; + } + return model_endpoint; +} + void common_set_adapter_lora(struct llama_context * ctx, std::vector<common_adapter_lora_info> & lora) { llama_clear_adapter_lora(ctx); for (auto & la : lora) { diff --git a/common/common.h b/common/common.h index 725b5123d24f9..e6eaa8e80cf05 100644 --- a/common/common.h +++ b/common/common.h @@ -543,6 +543,8 @@ struct ggml_threadpool_params ggml_threadpool_params_from_cpu_params(const cpu_p // clear LoRA adapters from context, then apply new list of adapters void common_set_adapter_lora(struct llama_context * ctx, std::vector<common_adapter_lora_info> & lora); +std::string get_model_endpoint(); + // // Batch utils // diff --git a/examples/run/run.cpp b/examples/run/run.cpp index 68e94b0b3c3f8..e63c2aac3314e 100644 --- a/examples/run/run.cpp +++ b/examples/run/run.cpp @@ -697,8 +697,10 @@ class LlamaData { std::vector<std::string> headers = { "User-Agent: llama-cpp", "Accept: application/json" }; std::string url; + std::string model_endpoint = get_model_endpoint(); + if (pos == std::string::npos) { - auto [model_name, manifest_url] = extract_model_and_tag(model, "https://huggingface.co/v2/"); + auto [model_name, manifest_url] = extract_model_and_tag(model, model_endpoint + "v2/"); hfr = model_name; nlohmann::json manifest; @@ -713,7 +715,7 @@ class LlamaData { hff = model.substr(pos + 1); } - url = "https://huggingface.co/" + hfr + "/resolve/main/" + hff; + url = model_endpoint + hfr + "/resolve/main/" + hff; return download(url, bn, true, headers); }
diff --git a/README.md b/README.md index 95a05e6ed75c1..9211ebfa1627a 100644 --- a/README.md +++ b/README.md @@ -265,7 +265,9 @@ The [Hugging Face](https://huggingface.co) platform hosts a [number of LLMs](htt - [Trending](https://huggingface.co/models?library=gguf&sort=trending) - [LLaMA](https://huggingface.co/models?sort=trending&search=llama+gguf) -You can either manually download the GGUF file or directly use any `llama.cpp`-compatible models from Hugging Face by using this CLI argument: `-hf <user>/<model>[:quant]` +You can either manually download the GGUF file or directly use any `llama.cpp`-compatible models from [Hugging Face](https://huggingface.co/) or other model hosting sites, such as [ModelScope](https://modelscope.cn/), by using this CLI argument: `-hf <user>/<model>[:quant]`. +By default, the CLI would download from Hugging Face, you can switch to other options with the environment variable `MODEL_ENDPOINT`. For example, you may opt to downloading model checkpoints from ModelScope or other model sharing communities by setting the environment variable, e.g. `MODEL_ENDPOINT=https://www.modelscope.cn/`. After downloading a model, use the CLI tools to run it locally - see below. diff --git a/common/arg.cpp b/common/arg.cpp index 642fefb57548f..0b57f9da1eec2 100644 --- a/common/arg.cpp +++ b/common/arg.cpp @@ -228,12 +228,13 @@ static bool common_download_file_single(const std::string & url, const std::stri curl_easy_setopt(curl.get(), CURLOPT_FOLLOWLOCATION, 1L); + http_headers.ptr = curl_slist_append(http_headers.ptr, "User-Agent: llama-cpp"); // Check if hf-token or bearer-token was specified if (!bearer_token.empty()) { std::string auth_header = "Authorization: Bearer " + bearer_token; http_headers.ptr = curl_slist_append(http_headers.ptr, auth_header.c_str()); - curl_easy_setopt(curl.get(), CURLOPT_HTTPHEADER, http_headers.ptr); + curl_easy_setopt(curl.get(), CURLOPT_HTTPHEADER, http_headers.ptr); #if defined(_WIN32) // CURLSSLOPT_NATIVE_CA tells libcurl to use standard certificate store of @@ -544,7 +545,10 @@ static struct common_hf_file_res common_get_hf_file(const std::string & hf_repo_ curl_ptr curl(curl_easy_init(), &curl_easy_cleanup); curl_slist_ptr http_headers; std::string res_str; - std::string url = "https://huggingface.co/v2/" + hf_repo + "/manifests/" + tag; + std::string model_endpoint = get_model_endpoint(); + std::string url = model_endpoint + "v2/" + hf_repo + "/manifests/" + tag; curl_easy_setopt(curl.get(), CURLOPT_NOPROGRESS, 1L); typedef size_t(*CURLOPT_WRITEFUNCTION_PTR)(void * ptr, size_t size, size_t nmemb, void * data); @@ -659,13 +663,8 @@ static void common_params_handle_model( } } - std::string hf_endpoint = "https://huggingface.co/"; - const char * hf_endpoint_env = getenv("HF_ENDPOINT"); - if (hf_endpoint_env) { - hf_endpoint = hf_endpoint_env; - if (hf_endpoint.back() != '/') hf_endpoint += '/'; - model.url = hf_endpoint + model.hf_repo + "/resolve/main/" + model.hf_file; + std::string model_endpoint = get_model_endpoint(); + model.url = model_endpoint + model.hf_repo + "/resolve/main/" + model.hf_file; // make sure model path is present (for caching purposes) if (model.path.empty()) { // this is to avoid different repo having same file name, or same file name in different subdirs diff --git a/common/common.cpp b/common/common.cpp index d4882c5123cce..4e1a020d017a1 100644 --- a/common/common.cpp +++ b/common/common.cpp @@ -1027,6 +1027,19 @@ struct common_init_result common_init_from_params(common_params & params) { return iparams; } +std::string get_model_endpoint() { + const char * model_endpoint_env = getenv("MODEL_ENDPOINT"); + // We still respect the use of environment-variable "HF_ENDPOINT" for backward-compatibility. + const char * hf_endpoint_env = getenv("HF_ENDPOINT"); + const char * endpoint_env = model_endpoint_env ? model_endpoint_env : hf_endpoint_env; + std::string model_endpoint = "https://huggingface.co/"; + if (endpoint_env) { + model_endpoint = endpoint_env; + if (model_endpoint.back() != '/') model_endpoint += '/'; + } + return model_endpoint; +} void common_set_adapter_lora(struct llama_context * ctx, std::vector<common_adapter_lora_info> & lora) { llama_clear_adapter_lora(ctx); for (auto & la : lora) { diff --git a/common/common.h b/common/common.h index 725b5123d24f9..e6eaa8e80cf05 100644 --- a/common/common.h +++ b/common/common.h @@ -543,6 +543,8 @@ struct ggml_threadpool_params ggml_threadpool_params_from_cpu_params(const cpu_p // clear LoRA adapters from context, then apply new list of adapters void common_set_adapter_lora(struct llama_context * ctx, std::vector<common_adapter_lora_info> & lora); +std::string get_model_endpoint(); // Batch utils diff --git a/examples/run/run.cpp b/examples/run/run.cpp index 68e94b0b3c3f8..e63c2aac3314e 100644 --- a/examples/run/run.cpp +++ b/examples/run/run.cpp @@ -697,8 +697,10 @@ class LlamaData { std::vector<std::string> headers = { "User-Agent: llama-cpp", "Accept: application/json" }; std::string url; + std::string model_endpoint = get_model_endpoint(); if (pos == std::string::npos) { - auto [model_name, manifest_url] = extract_model_and_tag(model, "https://huggingface.co/v2/"); hfr = model_name; nlohmann::json manifest; @@ -713,7 +715,7 @@ class LlamaData { hff = model.substr(pos + 1); } - url = "https://huggingface.co/" + hfr + "/resolve/main/" + hff; + url = model_endpoint + hfr + "/resolve/main/" + hff; return download(url, bn, true, headers);
[ "- }", "+ auto [model_name, manifest_url] = extract_model_and_tag(model, model_endpoint + \"v2/\");" ]
[ 55, 111 ]
{ "additions": 30, "author": "tastelikefeet", "deletions": 12, "html_url": "https://github.com/ggml-org/llama.cpp/pull/12664", "issue_id": 12664, "merged_at": "2025-04-11T12:01:57Z", "omission_probability": 0.1, "pr_number": 12664, "repo": "ggml-org/llama.cpp", "title": "contrib: support modelscope community", "total_changes": 42 }
828
diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index 9eba3f6a42b5e..114cbf83ed33b 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -36,13 +36,13 @@ jobs: matrix: config: # Multi-stage build - - { tag: "cpu", dockerfile: ".devops/cpu.Dockerfile", platforms: "linux/amd64,linux/arm64", full: true, light: true, server: true, freediskspace: false} - - { tag: "cuda", dockerfile: ".devops/cuda.Dockerfile", platforms: "linux/amd64", full: true, light: true, server: true, freediskspace: false} - - { tag: "musa", dockerfile: ".devops/musa.Dockerfile", platforms: "linux/amd64", full: true, light: true, server: true, freediskspace: true} - - { tag: "intel", dockerfile: ".devops/intel.Dockerfile", platforms: "linux/amd64", full: true, light: true, server: true, freediskspace: false} - - { tag: "vulkan", dockerfile: ".devops/vulkan.Dockerfile", platforms: "linux/amd64", full: true, light: true, server: true, freediskspace: false} + - { tag: "cpu", dockerfile: ".devops/cpu.Dockerfile", platforms: "linux/amd64,linux/arm64", full: true, light: true, server: true, free_disk_space: false } + - { tag: "cuda", dockerfile: ".devops/cuda.Dockerfile", platforms: "linux/amd64", full: true, light: true, server: true, free_disk_space: false } + - { tag: "musa", dockerfile: ".devops/musa.Dockerfile", platforms: "linux/amd64", full: true, light: true, server: true, free_disk_space: true } + - { tag: "intel", dockerfile: ".devops/intel.Dockerfile", platforms: "linux/amd64", full: true, light: true, server: true, free_disk_space: false } + - { tag: "vulkan", dockerfile: ".devops/vulkan.Dockerfile", platforms: "linux/amd64", full: true, light: true, server: true, free_disk_space: false } # Note: the rocm images are failing due to a compiler error and are disabled until this is fixed to allow the workflow to complete - #- {tag: "rocm", dockerfile: ".devops/rocm.Dockerfile", platforms: "linux/amd64,linux/arm64", full: true, light: true, server: true, freediskspace: true } + #- {tag: "rocm", dockerfile: ".devops/rocm.Dockerfile", platforms: "linux/amd64,linux/arm64", full: true, light: true, server: true, free_disk_space: true } steps: - name: Check out the repo uses: actions/checkout@v4
diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index 9eba3f6a42b5e..114cbf83ed33b 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -36,13 +36,13 @@ jobs: matrix: config: # Multi-stage build - - { tag: "cpu", dockerfile: ".devops/cpu.Dockerfile", platforms: "linux/amd64,linux/arm64", full: true, light: true, server: true, freediskspace: false} - - { tag: "cuda", dockerfile: ".devops/cuda.Dockerfile", platforms: "linux/amd64", full: true, light: true, server: true, freediskspace: false} - - { tag: "musa", dockerfile: ".devops/musa.Dockerfile", platforms: "linux/amd64", full: true, light: true, server: true, freediskspace: true} - - { tag: "intel", dockerfile: ".devops/intel.Dockerfile", platforms: "linux/amd64", full: true, light: true, server: true, freediskspace: false} - - { tag: "vulkan", dockerfile: ".devops/vulkan.Dockerfile", platforms: "linux/amd64", full: true, light: true, server: true, freediskspace: false} + - { tag: "cpu", dockerfile: ".devops/cpu.Dockerfile", platforms: "linux/amd64,linux/arm64", full: true, light: true, server: true, free_disk_space: false } + - { tag: "cuda", dockerfile: ".devops/cuda.Dockerfile", platforms: "linux/amd64", full: true, light: true, server: true, free_disk_space: false } + - { tag: "musa", dockerfile: ".devops/musa.Dockerfile", platforms: "linux/amd64", full: true, light: true, server: true, free_disk_space: true } + - { tag: "intel", dockerfile: ".devops/intel.Dockerfile", platforms: "linux/amd64", full: true, light: true, server: true, free_disk_space: false } # Note: the rocm images are failing due to a compiler error and are disabled until this is fixed to allow the workflow to complete - #- {tag: "rocm", dockerfile: ".devops/rocm.Dockerfile", platforms: "linux/amd64,linux/arm64", full: true, light: true, server: true, freediskspace: true } + #- {tag: "rocm", dockerfile: ".devops/rocm.Dockerfile", platforms: "linux/amd64,linux/arm64", full: true, light: true, server: true, free_disk_space: true } steps: - name: Check out the repo uses: actions/checkout@v4
[ "+ - { tag: \"vulkan\", dockerfile: \".devops/vulkan.Dockerfile\", platforms: \"linux/amd64\", full: true, light: true, server: true, free_disk_space: false }" ]
[ 17 ]
{ "additions": 6, "author": "yeahdongcn", "deletions": 6, "html_url": "https://github.com/ggml-org/llama.cpp/pull/12861", "issue_id": 12861, "merged_at": "2025-04-11T07:26:17Z", "omission_probability": 0.1, "pr_number": 12861, "repo": "ggml-org/llama.cpp", "title": "Replace freediskspace to free_disk_space in docker.yml", "total_changes": 12 }
829
diff --git a/build-xcframework.sh b/build-xcframework.sh index 1b9091d288cc8..97001b5f7ff85 100755 --- a/build-xcframework.sh +++ b/build-xcframework.sh @@ -41,6 +41,11 @@ COMMON_CMAKE_ARGS=( -DGGML_OPENMP=${GGML_OPENMP} ) +XCODE_VERSION=$(xcodebuild -version 2>/dev/null | head -n1 | awk '{ print $2 }') +MAJOR_VERSION=$(echo $XCODE_VERSION | cut -d. -f1) +MINOR_VERSION=$(echo $XCODE_VERSION | cut -d. -f2) +echo "Detected Xcode version: $XCODE_VERSION" + check_required_tool() { local tool=$1 local install_message=$2 @@ -325,21 +330,28 @@ combine_static_libraries() { # Platform-specific post-processing for device builds if [[ "$is_simulator" == "false" ]]; then - if command -v vtool &>/dev/null; then + if command -v xcrun vtool &>/dev/null; then case "$platform" in "ios") echo "Marking binary as a framework binary for iOS..." - vtool -set-build-version ios ${IOS_MIN_OS_VERSION} ${IOS_MIN_OS_VERSION} -replace \ + xcrun vtool -set-build-version ios ${IOS_MIN_OS_VERSION} ${IOS_MIN_OS_VERSION} -replace \ -output "${base_dir}/${output_lib}" "${base_dir}/${output_lib}" ;; "visionos") echo "Marking binary as a framework binary for visionOS..." - vtool -set-build-version xros ${VISIONOS_MIN_OS_VERSION} ${VISIONOS_MIN_OS_VERSION} -replace \ + if [[ "$MAJOR_VERSION" -gt 16 ]] || [[ "$MAJOR_VERSION" -eq 16 && "$MINOR_VERSION" -gt 2 ]]; then + echo "Xcode version greater than 16.2, using visionOS." + VISION_OS_BUILD_VERSION="visionos" + else + echo "Xcode version less than or equal to 16.2, using xros." + VISION_OS_BUILD_VERSION="xros" + fi + xcrun vtool -set-build-version ${VISION_OS_BUILD_VERSION} ${VISIONOS_MIN_OS_VERSION} ${VISIONOS_MIN_OS_VERSION} -replace \ -output "${base_dir}/${output_lib}" "${base_dir}/${output_lib}" ;; "tvos") echo "Marking binary as a framework binary for tvOS..." - vtool -set-build-version tvos ${TVOS_MIN_OS_VERSION} ${TVOS_MIN_OS_VERSION} -replace \ + xcrun vtool -set-build-version tvos ${TVOS_MIN_OS_VERSION} ${TVOS_MIN_OS_VERSION} -replace \ -output "${base_dir}/${output_lib}" "${base_dir}/${output_lib}" ;; esac
diff --git a/build-xcframework.sh b/build-xcframework.sh index 1b9091d288cc8..97001b5f7ff85 100755 --- a/build-xcframework.sh +++ b/build-xcframework.sh @@ -41,6 +41,11 @@ COMMON_CMAKE_ARGS=( -DGGML_OPENMP=${GGML_OPENMP} ) +MAJOR_VERSION=$(echo $XCODE_VERSION | cut -d. -f1) +MINOR_VERSION=$(echo $XCODE_VERSION | cut -d. -f2) +echo "Detected Xcode version: $XCODE_VERSION" + check_required_tool() { local tool=$1 local install_message=$2 @@ -325,21 +330,28 @@ combine_static_libraries() { # Platform-specific post-processing for device builds if [[ "$is_simulator" == "false" ]]; then - if command -v vtool &>/dev/null; then + if command -v xcrun vtool &>/dev/null; then case "$platform" in "ios") echo "Marking binary as a framework binary for iOS..." - vtool -set-build-version ios ${IOS_MIN_OS_VERSION} ${IOS_MIN_OS_VERSION} -replace \ + xcrun vtool -set-build-version ios ${IOS_MIN_OS_VERSION} ${IOS_MIN_OS_VERSION} -replace \ "visionos") echo "Marking binary as a framework binary for visionOS..." - vtool -set-build-version xros ${VISIONOS_MIN_OS_VERSION} ${VISIONOS_MIN_OS_VERSION} -replace \ + if [[ "$MAJOR_VERSION" -gt 16 ]] || [[ "$MAJOR_VERSION" -eq 16 && "$MINOR_VERSION" -gt 2 ]]; then + echo "Xcode version greater than 16.2, using visionOS." + else + echo "Xcode version less than or equal to 16.2, using xros." + VISION_OS_BUILD_VERSION="xros" + fi + xcrun vtool -set-build-version ${VISION_OS_BUILD_VERSION} ${VISIONOS_MIN_OS_VERSION} ${VISIONOS_MIN_OS_VERSION} -replace \ "tvos") echo "Marking binary as a framework binary for tvOS..." - vtool -set-build-version tvos ${TVOS_MIN_OS_VERSION} ${TVOS_MIN_OS_VERSION} -replace \ esac
[ "+XCODE_VERSION=$(xcodebuild -version 2>/dev/null | head -n1 | awk '{ print $2 }')", "+ VISION_OS_BUILD_VERSION=\"visionos\"", "+ xcrun vtool -set-build-version tvos ${TVOS_MIN_OS_VERSION} ${TVOS_MIN_OS_VERSION} -replace \\" ]
[ 8, 34, 45 ]
{ "additions": 16, "author": "danbev", "deletions": 4, "html_url": "https://github.com/ggml-org/llama.cpp/pull/12854", "issue_id": 12854, "merged_at": "2025-04-11T07:24:34Z", "omission_probability": 0.1, "pr_number": 12854, "repo": "ggml-org/llama.cpp", "title": "xcf : add check for visionos build version", "total_changes": 20 }
830
diff --git a/convert_hf_to_gguf.py b/convert_hf_to_gguf.py index 656dc987780c9..2d70f3cc20b09 100755 --- a/convert_hf_to_gguf.py +++ b/convert_hf_to_gguf.py @@ -1788,10 +1788,6 @@ def set_gguf_parameters(self): self.gguf_writer.add_expert_feed_forward_length(self.hparams["intermediate_size_moe"]) def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None): - name = name.replace("language_model.", "") - name = name.replace("feed_forward.", "mlp.") # a bit hacky for now - name = name.replace(".router.weight", ".gate.weight") # a bit hacky for now - # split the gate_up into gate and up if "gate_up_proj" in name: name_up = name.replace("gate_up_proj", "up_proj.weight") diff --git a/gguf-py/gguf/tensor_mapping.py b/gguf-py/gguf/tensor_mapping.py index 50bef12e3dbe7..a9e681f8e6765 100644 --- a/gguf-py/gguf/tensor_mapping.py +++ b/gguf-py/gguf/tensor_mapping.py @@ -30,6 +30,7 @@ class TensorNameMap: "rwkv.embeddings", # rwkv6 "model.embeddings", # rwkv7 "model.word_embeddings", # bailingmoe + "language_model.model.embed_tokens", # llama4 ), # Token type embeddings @@ -67,6 +68,7 @@ class TensorNameMap: "output_layer", # chatglm "head", # rwkv "head.out", # wavtokenizer + "language_model.lm_head", # llama4 ), # Output norm @@ -89,6 +91,7 @@ class TensorNameMap: "rwkv.ln_out", # rwkv6 "model.ln_out", # rwkv7 "backbone.final_layer_norm", # wavtokenizer + "language_model.model.norm", # llama4 ), # Rope frequencies @@ -130,6 +133,7 @@ class TensorNameMap: "transformer.layers.{bid}.attn_norm", # openelm "rwkv.blocks.{bid}.ln1", # rwkv6 "model.layers.{bid}.ln1", # rwkv7 + "language_model.model.layers.{bid}.input_layernorm", # llama4 ), # Attention norm 2 @@ -169,6 +173,7 @@ class TensorNameMap: "model.layers.{bid}.attention.wq", # internlm2 "transformer.decoder_layer.{bid}.multi_head_attention.query",# Grok "transformer.h.{bid}.attn.attention.q_proj", # exaone + "language_model.model.layers.{bid}.self_attn.q_proj", # llama4 ), # Attention key @@ -183,6 +188,7 @@ class TensorNameMap: "model.layers.{bid}.attention.wk", # internlm2 "transformer.decoder_layer.{bid}.multi_head_attention.key",# Grok "transformer.h.{bid}.attn.attention.k_proj", # exaone + "language_model.model.layers.{bid}.self_attn.k_proj", # llama4 ), # Attention value @@ -196,6 +202,7 @@ class TensorNameMap: "model.layers.{bid}.attention.wv", # internlm2 "transformer.decoder_layer.{bid}.multi_head_attention.value",# Grok "transformer.h.{bid}.attn.attention.v_proj", # exaone + "language_model.model.layers.{bid}.self_attn.v_proj", # llama4 ), # Attention output @@ -222,6 +229,7 @@ class TensorNameMap: "encoder.layers.{bid}.self_attention.dense", # chatglm "transformer.layers.{bid}.attn.out_proj", # openelm "transformer.h.{bid}.attn.attention.out_proj", # exaone + "language_model.model.layers.{bid}.self_attn.o_proj", # llama4 ), # Attention output norm @@ -259,6 +267,7 @@ class TensorNameMap: "transformer.decoder_layer.{bid}.rms_norm_2", # Grok "encoder.layers.{bid}.post_attention_layernorm", # chatglm "transformer.layers.{bid}.ffn_norm", # openelm + "language_model.model.layers.{bid}.post_attention_layernorm", # llama4 ), # Post feed-forward norm @@ -278,6 +287,7 @@ class TensorNameMap: "transformer.decoder_layer.{bid}.router", # Grok "transformer.blocks.{bid}.ffn.router.layer", # dbrx "model.layers.{bid}.block_sparse_moe.router.layer", # granitemoe + "language_model.model.layers.{bid}.feed_forward.router", # llama4 ), MODEL_TENSOR.FFN_GATE_INP_SHEXP: ( @@ -315,6 +325,7 @@ class TensorNameMap: "model.layers.{bid}.residual_mlp.w3", # arctic "encoder.layers.{bid}.mlp.dense_h_to_4h", # chatglm "transformer.h.{bid}.mlp.c_fc_1", # exaone + "language_model.model.layers.{bid}.feed_forward.up_proj", # llama4 ), MODEL_TENSOR.FFN_UP_EXP: ( @@ -323,11 +334,13 @@ class TensorNameMap: "transformer.blocks.{bid}.ffn.experts.mlp.v1", # dbrx "model.layers.{bid}.mlp.experts.up_proj", # qwen2moe olmoe (merged) "model.layers.{bid}.block_sparse_moe.experts.w3", # phimoe (merged) + "language_model.model.layers.{bid}.feed_forward.experts.up_proj", # llama4 ), MODEL_TENSOR.FFN_UP_SHEXP: ( "model.layers.{bid}.mlp.shared_expert.up_proj", # qwen2moe "model.layers.{bid}.mlp.shared_experts.up_proj", # deepseek deepseek2 + "language_model.model.layers.{bid}.feed_forward.shared_expert.up_proj", # llama4 ), # AWQ-activation gate @@ -348,6 +361,7 @@ class TensorNameMap: "transformer.h.{bid}.mlp.linear_1", # refact "model.layers.{bid}.residual_mlp.w1", # arctic "transformer.h.{bid}.mlp.c_fc_0", # exaone + "language_model.model.layers.{bid}.feed_forward.gate_proj", # llama4 ), MODEL_TENSOR.FFN_GATE_EXP: ( @@ -356,11 +370,13 @@ class TensorNameMap: "transformer.blocks.{bid}.ffn.experts.mlp.w1", # dbrx "model.layers.{bid}.mlp.experts.gate_proj", # qwen2moe olmoe (merged) "model.layers.{bid}.block_sparse_moe.experts.w1", # phimoe (merged) + "language_model.model.layers.{bid}.feed_forward.experts.gate_proj", # llama4 ), MODEL_TENSOR.FFN_GATE_SHEXP: ( "model.layers.{bid}.mlp.shared_expert.gate_proj", # qwen2moe "model.layers.{bid}.mlp.shared_experts.gate_proj", # deepseek deepseek2 + "language_model.model.layers.{bid}.feed_forward.shared_expert.gate_proj", # llama4 ), # Feed-forward down @@ -389,6 +405,7 @@ class TensorNameMap: "encoder.layer.{bid}.mlp.down_layer", # jina-bert-v2 "encoder.layers.{bid}.mlp.dense_4h_to_h", # chatglm "model.layers.h.{bid}.mlp.c_proj", # exaone + "language_model.model.layers.{bid}.feed_forward.down_proj", # llama4 ), MODEL_TENSOR.FFN_DOWN_EXP: ( @@ -398,11 +415,13 @@ class TensorNameMap: "model.layers.{bid}.mlp.experts.down_proj", # qwen2moe olmoe (merged) "model.layers.{bid}.block_sparse_moe.output_linear", # granitemoe "model.layers.{bid}.block_sparse_moe.experts.w2", # phimoe (merged) + "language_model.model.layers.{bid}.feed_forward.experts.down_proj", # llama4 ), MODEL_TENSOR.FFN_DOWN_SHEXP: ( "model.layers.{bid}.mlp.shared_expert.down_proj", # qwen2moe "model.layers.{bid}.mlp.shared_experts.down_proj", # deepseek deepseek2 + "language_model.model.layers.{bid}.feed_forward.shared_expert.down_proj", # llama4 ), MODEL_TENSOR.ATTN_Q_NORM: (
diff --git a/convert_hf_to_gguf.py b/convert_hf_to_gguf.py index 656dc987780c9..2d70f3cc20b09 100755 --- a/convert_hf_to_gguf.py +++ b/convert_hf_to_gguf.py @@ -1788,10 +1788,6 @@ def set_gguf_parameters(self): self.gguf_writer.add_expert_feed_forward_length(self.hparams["intermediate_size_moe"]) def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None): - name = name.replace("feed_forward.", "mlp.") # a bit hacky for now - name = name.replace(".router.weight", ".gate.weight") # a bit hacky for now - # split the gate_up into gate and up if "gate_up_proj" in name: name_up = name.replace("gate_up_proj", "up_proj.weight") diff --git a/gguf-py/gguf/tensor_mapping.py b/gguf-py/gguf/tensor_mapping.py index 50bef12e3dbe7..a9e681f8e6765 100644 --- a/gguf-py/gguf/tensor_mapping.py +++ b/gguf-py/gguf/tensor_mapping.py @@ -30,6 +30,7 @@ class TensorNameMap: "rwkv.embeddings", # rwkv6 "model.embeddings", # rwkv7 "model.word_embeddings", # bailingmoe + "language_model.model.embed_tokens", # llama4 # Token type embeddings @@ -67,6 +68,7 @@ class TensorNameMap: "output_layer", # chatglm "head", # rwkv "head.out", # wavtokenizer + "language_model.lm_head", # llama4 # Output norm @@ -89,6 +91,7 @@ class TensorNameMap: "rwkv.ln_out", # rwkv6 "model.ln_out", # rwkv7 "backbone.final_layer_norm", # wavtokenizer + "language_model.model.norm", # llama4 # Rope frequencies @@ -130,6 +133,7 @@ class TensorNameMap: "transformer.layers.{bid}.attn_norm", # openelm "rwkv.blocks.{bid}.ln1", # rwkv6 "model.layers.{bid}.ln1", # rwkv7 + "language_model.model.layers.{bid}.input_layernorm", # llama4 # Attention norm 2 @@ -169,6 +173,7 @@ class TensorNameMap: "model.layers.{bid}.attention.wq", # internlm2 "transformer.decoder_layer.{bid}.multi_head_attention.query",# Grok "transformer.h.{bid}.attn.attention.q_proj", # exaone + "language_model.model.layers.{bid}.self_attn.q_proj", # llama4 # Attention key @@ -183,6 +188,7 @@ class TensorNameMap: "model.layers.{bid}.attention.wk", # internlm2 "transformer.decoder_layer.{bid}.multi_head_attention.key",# Grok "transformer.h.{bid}.attn.attention.k_proj", # exaone + "language_model.model.layers.{bid}.self_attn.k_proj", # llama4 # Attention value @@ -196,6 +202,7 @@ class TensorNameMap: "model.layers.{bid}.attention.wv", # internlm2 "transformer.decoder_layer.{bid}.multi_head_attention.value",# Grok "transformer.h.{bid}.attn.attention.v_proj", # exaone + "language_model.model.layers.{bid}.self_attn.v_proj", # llama4 # Attention output @@ -222,6 +229,7 @@ class TensorNameMap: "encoder.layers.{bid}.self_attention.dense", # chatglm "transformer.layers.{bid}.attn.out_proj", # openelm "transformer.h.{bid}.attn.attention.out_proj", # exaone # Attention output norm @@ -259,6 +267,7 @@ class TensorNameMap: "transformer.decoder_layer.{bid}.rms_norm_2", # Grok "encoder.layers.{bid}.post_attention_layernorm", # chatglm "transformer.layers.{bid}.ffn_norm", # openelm # Post feed-forward norm @@ -278,6 +287,7 @@ class TensorNameMap: "transformer.decoder_layer.{bid}.router", # Grok "transformer.blocks.{bid}.ffn.router.layer", # dbrx "model.layers.{bid}.block_sparse_moe.router.layer", # granitemoe MODEL_TENSOR.FFN_GATE_INP_SHEXP: ( @@ -315,6 +325,7 @@ class TensorNameMap: "model.layers.{bid}.residual_mlp.w3", # arctic "encoder.layers.{bid}.mlp.dense_h_to_4h", # chatglm "transformer.h.{bid}.mlp.c_fc_1", # exaone MODEL_TENSOR.FFN_UP_EXP: ( @@ -323,11 +334,13 @@ class TensorNameMap: "transformer.blocks.{bid}.ffn.experts.mlp.v1", # dbrx "model.layers.{bid}.mlp.experts.up_proj", # qwen2moe olmoe (merged) "model.layers.{bid}.block_sparse_moe.experts.w3", # phimoe (merged) + "language_model.model.layers.{bid}.feed_forward.experts.up_proj", # llama4 MODEL_TENSOR.FFN_UP_SHEXP: ( "model.layers.{bid}.mlp.shared_expert.up_proj", # qwen2moe "model.layers.{bid}.mlp.shared_experts.up_proj", # deepseek deepseek2 + "language_model.model.layers.{bid}.feed_forward.shared_expert.up_proj", # llama4 # AWQ-activation gate @@ -348,6 +361,7 @@ class TensorNameMap: "transformer.h.{bid}.mlp.linear_1", # refact "model.layers.{bid}.residual_mlp.w1", # arctic "transformer.h.{bid}.mlp.c_fc_0", # exaone + "language_model.model.layers.{bid}.feed_forward.gate_proj", # llama4 MODEL_TENSOR.FFN_GATE_EXP: ( @@ -356,11 +370,13 @@ class TensorNameMap: "transformer.blocks.{bid}.ffn.experts.mlp.w1", # dbrx "model.layers.{bid}.mlp.experts.gate_proj", # qwen2moe olmoe (merged) "model.layers.{bid}.block_sparse_moe.experts.w1", # phimoe (merged) + "language_model.model.layers.{bid}.feed_forward.experts.gate_proj", # llama4 MODEL_TENSOR.FFN_GATE_SHEXP: ( "model.layers.{bid}.mlp.shared_expert.gate_proj", # qwen2moe "model.layers.{bid}.mlp.shared_experts.gate_proj", # deepseek deepseek2 # Feed-forward down @@ -389,6 +405,7 @@ class TensorNameMap: "encoder.layer.{bid}.mlp.down_layer", # jina-bert-v2 "encoder.layers.{bid}.mlp.dense_4h_to_h", # chatglm "model.layers.h.{bid}.mlp.c_proj", # exaone + "language_model.model.layers.{bid}.feed_forward.down_proj", # llama4 MODEL_TENSOR.FFN_DOWN_EXP: ( @@ -398,11 +415,13 @@ class TensorNameMap: "model.layers.{bid}.mlp.experts.down_proj", # qwen2moe olmoe (merged) "model.layers.{bid}.block_sparse_moe.output_linear", # granitemoe "model.layers.{bid}.block_sparse_moe.experts.w2", # phimoe (merged) + "language_model.model.layers.{bid}.feed_forward.experts.down_proj", # llama4 MODEL_TENSOR.FFN_DOWN_SHEXP: ( "model.layers.{bid}.mlp.shared_expert.down_proj", # qwen2moe "model.layers.{bid}.mlp.shared_experts.down_proj", # deepseek deepseek2 MODEL_TENSOR.ATTN_Q_NORM: (
[ "- name = name.replace(\"language_model.\", \"\")", "+ \"language_model.model.layers.{bid}.self_attn.o_proj\", # llama4", "+ \"language_model.model.layers.{bid}.post_attention_layernorm\", # llama4", "+ \"language_model.model.layers.{bid}.feed_forward.router\", # llama4", "+ \"language_model.model.layers.{bid}.feed_forward.up_proj\", # llama4", "+ \"language_model.model.layers.{bid}.feed_forward.shared_expert.gate_proj\", # llama4", "+ \"language_model.model.layers.{bid}.feed_forward.shared_expert.down_proj\", # llama4" ]
[ 8, 79, 87, 95, 103, 139, 161 ]
{ "additions": 19, "author": "ngxson", "deletions": 4, "html_url": "https://github.com/ggml-org/llama.cpp/pull/12870", "issue_id": 12870, "merged_at": "2025-04-11T07:23:37Z", "omission_probability": 0.1, "pr_number": 12870, "repo": "ggml-org/llama.cpp", "title": "convert : proper tensor name mapping for llama4", "total_changes": 23 }
831
diff --git a/src/llama-model.cpp b/src/llama-model.cpp index a442abeb85392..f251be73c5dc9 100644 --- a/src/llama-model.cpp +++ b/src/llama-model.cpp @@ -256,7 +256,7 @@ static ggml_backend_buffer_type_t select_weight_buft(const llama_hparams & hpara return nullptr; } -// CPU: ACCEL -> CPU extra -> GPU host -> CPU +// CPU: ACCEL -> GPU host -> CPU extra -> CPU static buft_list_t make_cpu_buft_list(const std::vector<ggml_backend_dev_t> & devices) { buft_list_t buft_list; @@ -272,32 +272,6 @@ static buft_list_t make_cpu_buft_list(const std::vector<ggml_backend_dev_t> & de } } - bool has_gpu_device = false; - for (auto * dev : devices) { - if (ggml_backend_dev_type(dev) == GGML_BACKEND_DEVICE_TYPE_GPU) { - has_gpu_device = true; - break; - } - } - - // add extra buffer types, only if no GPU device is present - // ref: https://github.com/ggml-org/llama.cpp/issues/12481#issuecomment-2743136094 - if (!has_gpu_device) { - auto * cpu_dev = ggml_backend_dev_by_type(GGML_BACKEND_DEVICE_TYPE_CPU); - auto * cpu_reg = ggml_backend_dev_backend_reg(cpu_dev); - auto ggml_backend_dev_get_extra_bufts_fn = (ggml_backend_dev_get_extra_bufts_t) - ggml_backend_reg_get_proc_address(cpu_reg, "ggml_backend_dev_get_extra_bufts"); - if (ggml_backend_dev_get_extra_bufts_fn) { - ggml_backend_buffer_type_t * extra_bufts = ggml_backend_dev_get_extra_bufts_fn(cpu_dev); - while (extra_bufts && *extra_bufts) { - buft_list.emplace_back(cpu_dev, *extra_bufts); - ++extra_bufts; - } - } - } else { - LLAMA_LOG_WARN("%s: disabling extra buffer types (i.e. repacking) since a GPU device is available\n", __func__); - } - // add a host buffer type // storing the tensors in a host buffer is useful when the processing of large batches // is offloaded to a GPU device, since it reduces the time spent on data transfers @@ -312,6 +286,20 @@ static buft_list_t make_cpu_buft_list(const std::vector<ggml_backend_dev_t> & de } } + // add extra buffer types, only if no GPU device is present + // ref: https://github.com/ggml-org/llama.cpp/issues/12481#issuecomment-2743136094 + auto * cpu_dev = ggml_backend_dev_by_type(GGML_BACKEND_DEVICE_TYPE_CPU); + auto * cpu_reg = ggml_backend_dev_backend_reg(cpu_dev); + auto ggml_backend_dev_get_extra_bufts_fn = (ggml_backend_dev_get_extra_bufts_t) + ggml_backend_reg_get_proc_address(cpu_reg, "ggml_backend_dev_get_extra_bufts"); + if (ggml_backend_dev_get_extra_bufts_fn) { + ggml_backend_buffer_type_t * extra_bufts = ggml_backend_dev_get_extra_bufts_fn(cpu_dev); + while (extra_bufts && *extra_bufts) { + buft_list.emplace_back(cpu_dev, *extra_bufts); + ++extra_bufts; + } + } + // add the CPU buffer type for (size_t i = 0; i < ggml_backend_dev_count(); ++i) { ggml_backend_dev_t dev = ggml_backend_dev_get(i);
diff --git a/src/llama-model.cpp b/src/llama-model.cpp index a442abeb85392..f251be73c5dc9 100644 --- a/src/llama-model.cpp +++ b/src/llama-model.cpp @@ -256,7 +256,7 @@ static ggml_backend_buffer_type_t select_weight_buft(const llama_hparams & hpara return nullptr; } -// CPU: ACCEL -> CPU extra -> GPU host -> CPU +// CPU: ACCEL -> GPU host -> CPU extra -> CPU static buft_list_t make_cpu_buft_list(const std::vector<ggml_backend_dev_t> & devices) { buft_list_t buft_list; @@ -272,32 +272,6 @@ static buft_list_t make_cpu_buft_list(const std::vector<ggml_backend_dev_t> & de - bool has_gpu_device = false; - for (auto * dev : devices) { - if (ggml_backend_dev_type(dev) == GGML_BACKEND_DEVICE_TYPE_GPU) { - has_gpu_device = true; - break; - // add extra buffer types, only if no GPU device is present - if (!has_gpu_device) { - auto * cpu_dev = ggml_backend_dev_by_type(GGML_BACKEND_DEVICE_TYPE_CPU); - auto * cpu_reg = ggml_backend_dev_backend_reg(cpu_dev); - auto ggml_backend_dev_get_extra_bufts_fn = (ggml_backend_dev_get_extra_bufts_t) - ggml_backend_reg_get_proc_address(cpu_reg, "ggml_backend_dev_get_extra_bufts"); - if (ggml_backend_dev_get_extra_bufts_fn) { - ggml_backend_buffer_type_t * extra_bufts = ggml_backend_dev_get_extra_bufts_fn(cpu_dev); - while (extra_bufts && *extra_bufts) { - buft_list.emplace_back(cpu_dev, *extra_bufts); - } - } else { // add a host buffer type // storing the tensors in a host buffer is useful when the processing of large batches // is offloaded to a GPU device, since it reduces the time spent on data transfers @@ -312,6 +286,20 @@ static buft_list_t make_cpu_buft_list(const std::vector<ggml_backend_dev_t> & de + // add extra buffer types, only if no GPU device is present + // ref: https://github.com/ggml-org/llama.cpp/issues/12481#issuecomment-2743136094 + auto * cpu_dev = ggml_backend_dev_by_type(GGML_BACKEND_DEVICE_TYPE_CPU); + auto * cpu_reg = ggml_backend_dev_backend_reg(cpu_dev); + auto ggml_backend_dev_get_extra_bufts_fn = (ggml_backend_dev_get_extra_bufts_t) + ggml_backend_reg_get_proc_address(cpu_reg, "ggml_backend_dev_get_extra_bufts"); + if (ggml_backend_dev_get_extra_bufts_fn) { + ggml_backend_buffer_type_t * extra_bufts = ggml_backend_dev_get_extra_bufts_fn(cpu_dev); + while (extra_bufts && *extra_bufts) { + buft_list.emplace_back(cpu_dev, *extra_bufts); + ++extra_bufts; + } + } + // add the CPU buffer type for (size_t i = 0; i < ggml_backend_dev_count(); ++i) { ggml_backend_dev_t dev = ggml_backend_dev_get(i);
[ "- // ref: https://github.com/ggml-org/llama.cpp/issues/12481#issuecomment-2743136094", "- ++extra_bufts;", "- LLAMA_LOG_WARN(\"%s: disabling extra buffer types (i.e. repacking) since a GPU device is available\\n\", __func__);" ]
[ 26, 36, 40 ]
{ "additions": 15, "author": "Djip007", "deletions": 27, "html_url": "https://github.com/ggml-org/llama.cpp/pull/12632", "issue_id": 12632, "merged_at": "2025-03-29T13:07:37Z", "omission_probability": 0.1, "pr_number": 12632, "repo": "ggml-org/llama.cpp", "title": "change cpu_buft_list order: ACCEL -> GPU host -> CPU extra -> CPU", "total_changes": 42 }
832
diff --git a/ggml/src/ggml-cann/acl_tensor.cpp b/ggml/src/ggml-cann/acl_tensor.cpp index 9b6553c500129..f5462c5a18e37 100644 --- a/ggml/src/ggml-cann/acl_tensor.cpp +++ b/ggml/src/ggml-cann/acl_tensor.cpp @@ -41,6 +41,8 @@ aclDataType ggml_cann_type_mapping(ggml_type type) { return ACL_INT4; case GGML_TYPE_Q8_0: return ACL_INT8; + case GGML_TYPE_I64: + return ACL_INT64; default: return ACL_DT_UNDEFINED; } diff --git a/ggml/src/ggml-cann/aclnn_ops.cpp b/ggml/src/ggml-cann/aclnn_ops.cpp index 25b2599c7bf6a..37d4117972358 100644 --- a/ggml/src/ggml-cann/aclnn_ops.cpp +++ b/ggml/src/ggml-cann/aclnn_ops.cpp @@ -59,6 +59,11 @@ #include <aclnnop/aclnn_div.h> #include <aclnnop/aclnn_convolution.h> #include <aclnnop/aclnn_elu.h> +#include <aclnnop/aclnn_log.h> +#include <aclnnop/aclnn_mean.h> +#include <aclnnop/aclnn_reflection_pad1d.h> +#include <aclnnop/aclnn_eq_tensor.h> +#include <aclnnop/aclnn_gt_scalar.h> #include <float.h> #include <cmath> @@ -2598,6 +2603,7 @@ void ggml_cann_rope(ggml_backend_cann_context& ctx, ggml_tensor* dst) { aclTensor* acl_dst = ggml_cann_create_tensor(dst, dst->ne, dst->nb, 3); GGML_CANN_CALL_ACLNN_OP(ArgMax, acl_src, 3, false, acl_dst); + ACL_CHECK(aclDestroyTensor(acl_src)); ACL_CHECK(aclDestroyTensor(acl_dst)); } @@ -2629,6 +2635,9 @@ void ggml_cann_conv_transpose_1d(ggml_backend_cann_context& ctx, ggml_tensor* ds ACL_CHECK(aclDestroyTensor(acl_weight)); ACL_CHECK(aclDestroyTensor(acl_dst)); + ACL_CHECK(aclDestroyIntArray(stride)); + ACL_CHECK(aclDestroyIntArray(padding)); + ACL_CHECK(aclDestroyIntArray(dilation)); } void ggml_cann_elu(ggml_backend_cann_context& ctx, ggml_tensor* dst){ @@ -2646,4 +2655,79 @@ void ggml_cann_elu(ggml_backend_cann_context& ctx, ggml_tensor* dst){ ACL_CHECK(aclDestroyTensor(acl_input)); ACL_CHECK(aclDestroyTensor(acl_dst)); + ACL_CHECK(aclDestroyScalar(alpha)); +} + +void ggml_cann_mean(ggml_backend_cann_context& ctx, ggml_tensor* dst){ + ggml_tensor * src0 = dst->src[0]; + + aclTensor* acl_src = ggml_cann_create_tensor(src0); + aclTensor* acl_dst = ggml_cann_create_tensor(dst); + + int64_t reduceDimValue[] = {3}; + aclIntArray* reduceDim = aclCreateIntArray(reduceDimValue, 1); + bool keepDim = true; + + GGML_CANN_CALL_ACLNN_OP(Mean, acl_src, reduceDim, keepDim, ACL_FLOAT, acl_dst); + + ACL_CHECK(aclDestroyTensor(acl_src)); + ACL_CHECK(aclDestroyTensor(acl_dst)); + ACL_CHECK(aclDestroyIntArray(reduceDim)); +} + +void ggml_cann_pad_reflect_1d(ggml_backend_cann_context& ctx, ggml_tensor* dst){ + ggml_tensor * src0 = dst->src[0]; + int32_t *opts = (int32_t *) dst->op_params; + int64_t paddingsArray[2] = {opts[0], opts[1]}; + aclIntArray* paddings = aclCreateIntArray(paddingsArray, 2); + + for (int64_t i = 0; i < src0->ne[3]; i++) { + aclTensor* acl_src = ggml_cann_create_tensor( + (char*)src0->data + i * src0->ne[3], + ggml_cann_type_mapping(src0->type), ggml_element_size(src0), + src0->ne, src0->nb, 3); + + aclTensor* acl_dst = ggml_cann_create_tensor( + (char*)dst->data + i * src0->ne[3], + ggml_cann_type_mapping(dst->type), ggml_element_size(dst), + dst->ne, dst->nb, 3); + + GGML_CANN_CALL_ACLNN_OP(ReflectionPad1d, acl_src, paddings, acl_dst); + + ACL_CHECK(aclDestroyTensor(acl_src)); + ACL_CHECK(aclDestroyTensor(acl_dst)); + } + ACL_CHECK(aclDestroyIntArray(paddings)); +} + +void ggml_cann_count_equal(ggml_backend_cann_context& ctx, ggml_tensor* dst){ + ggml_tensor * src0 = dst->src[0]; + ggml_tensor * src1 = dst->src[1]; + + aclTensor* acl_self = ggml_cann_create_tensor(src0); + aclTensor* acl_other = ggml_cann_create_tensor(src1); + + GGML_CANN_CALL_ACLNN_OP(InplaceEqTensor, acl_self, acl_other); + + ggml_cann_sum(ctx, dst); + + ACL_CHECK(aclDestroyTensor(acl_self)); + ACL_CHECK(aclDestroyTensor(acl_other)); +} + +void ggml_cann_step(ggml_backend_cann_context& ctx, ggml_tensor* dst){ + ggml_tensor * src0 = dst->src[0]; + + aclTensor* acl_src = ggml_cann_create_tensor(src0); + aclTensor* acl_dst = ggml_cann_create_tensor(dst); + + float alphaValue = 0.0f; + aclScalar* alpha = nullptr; + alpha = aclCreateScalar(&alphaValue, aclDataType::ACL_FLOAT); + + GGML_CANN_CALL_ACLNN_OP(GtScalar, acl_src, alpha, acl_dst); + + ACL_CHECK(aclDestroyTensor(acl_src)); + ACL_CHECK(aclDestroyTensor(acl_dst)); + ACL_CHECK(aclDestroyScalar(alpha)); } diff --git a/ggml/src/ggml-cann/aclnn_ops.h b/ggml/src/ggml-cann/aclnn_ops.h index aadf013de50c2..b2d1b3c36d238 100644 --- a/ggml/src/ggml-cann/aclnn_ops.h +++ b/ggml/src/ggml-cann/aclnn_ops.h @@ -42,6 +42,8 @@ #include <aclnnop/aclnn_sqrt.h> #include <aclnnop/aclnn_sin.h> #include <aclnnop/aclnn_cos.h> +#include <aclnnop/aclnn_log.h> +#include <aclnnop/aclnn_sign.h> #include "acl_tensor.h" #include "common.h" @@ -650,6 +652,67 @@ void ggml_cann_conv_transpose_1d(ggml_backend_cann_context& ctx, ggml_tensor* ds */ void ggml_cann_elu(ggml_backend_cann_context& ctx, ggml_tensor* dst); +/** + * @brief Computes the mean of a ggml tensor element-wise using the CANN backend. + * + * @details This function calculates the element-wise mean of the input tensor. + * The result is written to the destination tensor `dst`. + * The mean is computed by averaging the values across the entire tensor. + * + * This operation is optimized using the CANN backend for high-performance inference or training. + * + * @param ctx The CANN context used for operations. + * @param dst The destination tensor where the mean result will be stored. + * dst->op is expected to be `GGML_OP_MEAN`. + */ +void ggml_cann_mean(ggml_backend_cann_context& ctx, ggml_tensor* dst); + +/** + * @brief Applies 1D reflect padding to a ggml tensor using the CANN backend. + * + * @details This function performs 1D reflect padding on the input tensor. + * The amount of padding on each side is specified by parameters stored in `dst->op_params`. + * The operation reflects the values at the borders of the tensor to generate the padded output. + * + * This operation is optimized using the CANN backend for high-performance inference or training. + * + * @param ctx The CANN context used for operations. + * @param dst The destination tensor where the padded result will be stored. + * dst->op is expected to be `GGML_OP_PAD_REFLECT_1D`. + */ +void ggml_cann_pad_reflect_1d(ggml_backend_cann_context& ctx, ggml_tensor* dst); + +/** + * @brief Counts the number of equal elements in two ggml tensors using the CANN backend. + * + * @details This function performs an element-wise comparison between two input tensors, + * and counts the number of positions where the elements are equal. The result is + * stored in the destination tensor `dst` as a scalar. + * + * The operation is optimized using the CANN backend, making it suitable for + * high-performance inference or training scenarios. + * + * @param ctx The CANN context used for operations. + * @param dst The destination tensor where the result will be stored. + * dst->op is expected to be `GGML_OP_COUNT_EQUAL`. + */ +void ggml_cann_count_equal(ggml_backend_cann_context& ctx, ggml_tensor* dst); + +/** + * @brief Applies the Step activation function to a ggml tensor using the CANN backend. + * + * @details This function applies a step function element-wise to the input tensor, where + * each element is transformed to 1.0 if it is greater than 0, and 0.0 otherwise. + * The result is stored in the destination tensor `dst`. + * + * This operation is accelerated using the CANN backend to improve runtime performance. + * + * @param ctx The CANN context used for operations. + * @param dst The destination tensor where the result will be stored. + * dst->op is expected to be `GGML_OP_STEP`. + */ +void ggml_cann_step(ggml_backend_cann_context& ctx, ggml_tensor* dst); + /** * @brief Applies a element-wise operation to two input tensors using the CANN * backend. diff --git a/ggml/src/ggml-cann/ggml-cann.cpp b/ggml/src/ggml-cann/ggml-cann.cpp index f9187ba819496..b513270c6e5ac 100644 --- a/ggml/src/ggml-cann/ggml-cann.cpp +++ b/ggml/src/ggml-cann/ggml-cann.cpp @@ -1358,6 +1358,12 @@ static bool ggml_cann_compute_forward(ggml_backend_cann_context& ctx, case GGML_UNARY_OP_ELU: ggml_cann_elu(ctx, dst); break; + case GGML_UNARY_OP_SGN: + GGML_CANN_CALL_UNARY_OP(Sign); + break; + case GGML_UNARY_OP_STEP: + ggml_cann_step(ctx, dst); + break; default: return false; } @@ -1456,6 +1462,18 @@ static bool ggml_cann_compute_forward(ggml_backend_cann_context& ctx, case GGML_OP_CONV_TRANSPOSE_1D: ggml_cann_conv_transpose_1d(ctx, dst); break; + case GGML_OP_LOG: + GGML_CANN_CALL_UNARY_OP(Log); + break; + case GGML_OP_MEAN: + ggml_cann_mean(ctx, dst); + break; + case GGML_OP_PAD_REFLECT_1D: + ggml_cann_pad_reflect_1d(ctx, dst); + break; + case GGML_OP_COUNT_EQUAL: + ggml_cann_count_equal(ctx, dst); + break; default: return false; } @@ -1718,6 +1736,8 @@ static bool ggml_backend_cann_supports_op(ggml_backend_dev_t dev, case GGML_UNARY_OP_TANH: case GGML_UNARY_OP_EXP: case GGML_UNARY_OP_ELU: + case GGML_UNARY_OP_SGN: + case GGML_UNARY_OP_STEP: return true; default: return false; @@ -1851,6 +1871,10 @@ static bool ggml_backend_cann_supports_op(ggml_backend_dev_t dev, case GGML_OP_COS: case GGML_OP_SIN: case GGML_OP_CONV_TRANSPOSE_1D: + case GGML_OP_LOG: + case GGML_OP_MEAN: + case GGML_OP_PAD_REFLECT_1D: + case GGML_OP_COUNT_EQUAL: return true; default: return false;
diff --git a/ggml/src/ggml-cann/acl_tensor.cpp b/ggml/src/ggml-cann/acl_tensor.cpp index 9b6553c500129..f5462c5a18e37 100644 --- a/ggml/src/ggml-cann/acl_tensor.cpp +++ b/ggml/src/ggml-cann/acl_tensor.cpp @@ -41,6 +41,8 @@ aclDataType ggml_cann_type_mapping(ggml_type type) { return ACL_INT4; case GGML_TYPE_Q8_0: return ACL_INT8; + return ACL_INT64; return ACL_DT_UNDEFINED; diff --git a/ggml/src/ggml-cann/aclnn_ops.cpp b/ggml/src/ggml-cann/aclnn_ops.cpp index 25b2599c7bf6a..37d4117972358 100644 --- a/ggml/src/ggml-cann/aclnn_ops.cpp +++ b/ggml/src/ggml-cann/aclnn_ops.cpp @@ -59,6 +59,11 @@ #include <aclnnop/aclnn_div.h> #include <aclnnop/aclnn_convolution.h> #include <aclnnop/aclnn_elu.h> +#include <aclnnop/aclnn_reflection_pad1d.h> +#include <aclnnop/aclnn_eq_tensor.h> +#include <aclnnop/aclnn_gt_scalar.h> #include <float.h> #include <cmath> @@ -2598,6 +2603,7 @@ void ggml_cann_rope(ggml_backend_cann_context& ctx, ggml_tensor* dst) { aclTensor* acl_dst = ggml_cann_create_tensor(dst, dst->ne, dst->nb, 3); GGML_CANN_CALL_ACLNN_OP(ArgMax, acl_src, 3, false, acl_dst); ACL_CHECK(aclDestroyTensor(acl_src)); @@ -2629,6 +2635,9 @@ void ggml_cann_conv_transpose_1d(ggml_backend_cann_context& ctx, ggml_tensor* ds ACL_CHECK(aclDestroyTensor(acl_weight)); + ACL_CHECK(aclDestroyIntArray(stride)); + ACL_CHECK(aclDestroyIntArray(padding)); + ACL_CHECK(aclDestroyIntArray(dilation)); void ggml_cann_elu(ggml_backend_cann_context& ctx, ggml_tensor* dst){ @@ -2646,4 +2655,79 @@ void ggml_cann_elu(ggml_backend_cann_context& ctx, ggml_tensor* dst){ ACL_CHECK(aclDestroyTensor(acl_input)); +void ggml_cann_mean(ggml_backend_cann_context& ctx, ggml_tensor* dst){ + int64_t reduceDimValue[] = {3}; + aclIntArray* reduceDim = aclCreateIntArray(reduceDimValue, 1); + bool keepDim = true; + GGML_CANN_CALL_ACLNN_OP(Mean, acl_src, reduceDim, keepDim, ACL_FLOAT, acl_dst); + ACL_CHECK(aclDestroyIntArray(reduceDim)); +void ggml_cann_pad_reflect_1d(ggml_backend_cann_context& ctx, ggml_tensor* dst){ + int32_t *opts = (int32_t *) dst->op_params; + int64_t paddingsArray[2] = {opts[0], opts[1]}; + aclIntArray* paddings = aclCreateIntArray(paddingsArray, 2); + for (int64_t i = 0; i < src0->ne[3]; i++) { + aclTensor* acl_src = ggml_cann_create_tensor( + (char*)src0->data + i * src0->ne[3], + ggml_cann_type_mapping(src0->type), ggml_element_size(src0), + aclTensor* acl_dst = ggml_cann_create_tensor( + (char*)dst->data + i * src0->ne[3], + ggml_cann_type_mapping(dst->type), ggml_element_size(dst), + dst->ne, dst->nb, 3); + GGML_CANN_CALL_ACLNN_OP(ReflectionPad1d, acl_src, paddings, acl_dst); + ACL_CHECK(aclDestroyTensor(acl_src)); + } + ACL_CHECK(aclDestroyIntArray(paddings)); +void ggml_cann_count_equal(ggml_backend_cann_context& ctx, ggml_tensor* dst){ + ggml_tensor * src1 = dst->src[1]; + aclTensor* acl_self = ggml_cann_create_tensor(src0); + aclTensor* acl_other = ggml_cann_create_tensor(src1); + GGML_CANN_CALL_ACLNN_OP(InplaceEqTensor, acl_self, acl_other); + ACL_CHECK(aclDestroyTensor(acl_self)); + ACL_CHECK(aclDestroyTensor(acl_other)); +void ggml_cann_step(ggml_backend_cann_context& ctx, ggml_tensor* dst){ + float alphaValue = 0.0f; + aclScalar* alpha = nullptr; + alpha = aclCreateScalar(&alphaValue, aclDataType::ACL_FLOAT); diff --git a/ggml/src/ggml-cann/aclnn_ops.h b/ggml/src/ggml-cann/aclnn_ops.h index aadf013de50c2..b2d1b3c36d238 100644 --- a/ggml/src/ggml-cann/aclnn_ops.h +++ b/ggml/src/ggml-cann/aclnn_ops.h @@ -42,6 +42,8 @@ #include <aclnnop/aclnn_sqrt.h> #include <aclnnop/aclnn_sin.h> #include <aclnnop/aclnn_cos.h> +#include <aclnnop/aclnn_sign.h> #include "acl_tensor.h" #include "common.h" @@ -650,6 +652,67 @@ void ggml_cann_conv_transpose_1d(ggml_backend_cann_context& ctx, ggml_tensor* ds */ void ggml_cann_elu(ggml_backend_cann_context& ctx, ggml_tensor* dst); + * @details This function calculates the element-wise mean of the input tensor. + * The mean is computed by averaging the values across the entire tensor. +void ggml_cann_mean(ggml_backend_cann_context& ctx, ggml_tensor* dst); + * @brief Applies 1D reflect padding to a ggml tensor using the CANN backend. + * @details This function performs 1D reflect padding on the input tensor. + * The amount of padding on each side is specified by parameters stored in `dst->op_params`. + * @param dst The destination tensor where the padded result will be stored. + * dst->op is expected to be `GGML_OP_PAD_REFLECT_1D`. +void ggml_cann_pad_reflect_1d(ggml_backend_cann_context& ctx, ggml_tensor* dst); + * @brief Counts the number of equal elements in two ggml tensors using the CANN backend. + * and counts the number of positions where the elements are equal. The result is + * stored in the destination tensor `dst` as a scalar. + * The operation is optimized using the CANN backend, making it suitable for + * high-performance inference or training scenarios. + * dst->op is expected to be `GGML_OP_COUNT_EQUAL`. +void ggml_cann_count_equal(ggml_backend_cann_context& ctx, ggml_tensor* dst); + * @brief Applies the Step activation function to a ggml tensor using the CANN backend. + * @details This function applies a step function element-wise to the input tensor, where + * each element is transformed to 1.0 if it is greater than 0, and 0.0 otherwise. + * The result is stored in the destination tensor `dst`. + * This operation is accelerated using the CANN backend to improve runtime performance. + * dst->op is expected to be `GGML_OP_STEP`. /** * @brief Applies a element-wise operation to two input tensors using the CANN * backend. diff --git a/ggml/src/ggml-cann/ggml-cann.cpp b/ggml/src/ggml-cann/ggml-cann.cpp index f9187ba819496..b513270c6e5ac 100644 --- a/ggml/src/ggml-cann/ggml-cann.cpp +++ b/ggml/src/ggml-cann/ggml-cann.cpp @@ -1358,6 +1358,12 @@ static bool ggml_cann_compute_forward(ggml_backend_cann_context& ctx, ggml_cann_elu(ctx, dst); break; + GGML_CANN_CALL_UNARY_OP(Sign); + ggml_cann_step(ctx, dst); } @@ -1456,6 +1462,18 @@ static bool ggml_cann_compute_forward(ggml_backend_cann_context& ctx, ggml_cann_conv_transpose_1d(ctx, dst); break; + ggml_cann_mean(ctx, dst); + ggml_cann_pad_reflect_1d(ctx, dst); + ggml_cann_count_equal(ctx, dst); @@ -1718,6 +1736,8 @@ static bool ggml_backend_cann_supports_op(ggml_backend_dev_t dev, case GGML_UNARY_OP_TANH: case GGML_UNARY_OP_EXP: return true; @@ -1851,6 +1871,10 @@ static bool ggml_backend_cann_supports_op(ggml_backend_dev_t dev, case GGML_OP_COS: case GGML_OP_SIN: return true;
[ "+ case GGML_TYPE_I64:", "+#include <aclnnop/aclnn_mean.h>", "+ src0->ne, src0->nb, 3);", "+ ACL_CHECK(aclDestroyTensor(acl_dst));", "+ ggml_cann_sum(ctx, dst);", "+ GGML_CANN_CALL_ACLNN_OP(GtScalar, acl_src, alpha, acl_dst);", "+ * @brief Computes the mean of a ggml tensor element-wise using the CANN backend.", "+ * The result is written to the destination tensor `dst`.", "+ * @param dst The destination tensor where the mean result will be stored.", "+ * dst->op is expected to be `GGML_OP_MEAN`.", "+ * The operation reflects the values at the borders of the tensor to generate the padded output.", "+ * @details This function performs an element-wise comparison between two input tensors,", "+void ggml_cann_step(ggml_backend_cann_context& ctx, ggml_tensor* dst);", "+ GGML_CANN_CALL_UNARY_OP(Log);" ]
[ 8, 22, 81, 91, 105, 121, 145, 148, 154, 155, 164, 177, 203, 230 ]
{ "additions": 173, "author": "noemotiovon", "deletions": 0, "html_url": "https://github.com/ggml-org/llama.cpp/pull/12841", "issue_id": 12841, "merged_at": "2025-04-10T00:51:52Z", "omission_probability": 0.1, "pr_number": 12841, "repo": "ggml-org/llama.cpp", "title": "[CANN]Support Opt LOG && MEAN && PAD_REFLECT_1D && STEP ...", "total_changes": 173 }
833
diff --git a/.devops/cuda.Dockerfile b/.devops/cuda.Dockerfile index a196111e61d62..8ae57d2e289f4 100644 --- a/.devops/cuda.Dockerfile +++ b/.devops/cuda.Dockerfile @@ -21,7 +21,7 @@ COPY . . RUN if [ "${CUDA_DOCKER_ARCH}" != "default" ]; then \ export CMAKE_ARGS="-DCMAKE_CUDA_ARCHITECTURES=${CUDA_DOCKER_ARCH}"; \ fi && \ - cmake -B build -DGGML_NATIVE=OFF -DGGML_CUDA=ON ${CMAKE_ARGS} -DCMAKE_EXE_LINKER_FLAGS=-Wl,--allow-shlib-undefined . && \ + cmake -B build -DGGML_NATIVE=OFF -DGGML_CUDA=ON -DLLAMA_CURL=ON -DGGML_BACKEND_DL=ON -DGGML_CPU_ALL_VARIANTS=ON ${CMAKE_ARGS} -DCMAKE_EXE_LINKER_FLAGS=-Wl,--allow-shlib-undefined . && \ cmake --build build --config Release -j$(nproc) RUN mkdir -p /app/lib && \ diff --git a/.devops/intel.Dockerfile b/.devops/intel.Dockerfile index e2b381766f196..091e1dc5d8b2c 100644 --- a/.devops/intel.Dockerfile +++ b/.devops/intel.Dockerfile @@ -17,7 +17,7 @@ RUN if [ "${GGML_SYCL_F16}" = "ON" ]; then \ && export OPT_SYCL_F16="-DGGML_SYCL_F16=ON"; \ fi && \ echo "Building with dynamic libs" && \ - cmake -B build -DGGML_NATIVE=OFF -DGGML_SYCL=ON -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx ${OPT_SYCL_F16} && \ + cmake -B build -DGGML_NATIVE=OFF -DGGML_SYCL=ON -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx -DLLAMA_CURL=ON -DGGML_BACKEND_DL=ON -DGGML_CPU_ALL_VARIANTS=ON ${OPT_SYCL_F16} && \ cmake --build build --config Release -j$(nproc) RUN mkdir -p /app/lib && \ diff --git a/.devops/musa.Dockerfile b/.devops/musa.Dockerfile index e8297c6948c5c..261a2823a0e52 100644 --- a/.devops/musa.Dockerfile +++ b/.devops/musa.Dockerfile @@ -35,7 +35,7 @@ COPY . . RUN if [ "${MUSA_DOCKER_ARCH}" != "default" ]; then \ export CMAKE_ARGS="-DMUSA_ARCHITECTURES=${MUSA_DOCKER_ARCH}"; \ fi && \ - cmake -B build -DGGML_NATIVE=OFF -DGGML_MUSA=ON ${CMAKE_ARGS} -DCMAKE_EXE_LINKER_FLAGS=-Wl,--allow-shlib-undefined . && \ + cmake -B build -DGGML_NATIVE=OFF -DGGML_MUSA=ON -DLLAMA_CURL=ON -DGGML_BACKEND_DL=ON -DGGML_CPU_ALL_VARIANTS=ON ${CMAKE_ARGS} -DCMAKE_EXE_LINKER_FLAGS=-Wl,--allow-shlib-undefined . && \ cmake --build build --config Release -j$(nproc) RUN mkdir -p /app/lib && \ diff --git a/.devops/rocm.Dockerfile b/.devops/rocm.Dockerfile index 66687a25ba068..a1b34723a46af 100644 --- a/.devops/rocm.Dockerfile +++ b/.devops/rocm.Dockerfile @@ -17,8 +17,8 @@ FROM ${BASE_ROCM_DEV_CONTAINER} AS build # gfx906 is deprecated #check https://rocm.docs.amd.com/projects/install-on-linux/en/docs-6.2.4/reference/system-requirements.html -#ARG ROCM_DOCKER_ARCH='gfx803,gfx900,gfx906,gfx908,gfx90a,gfx942,gfx1010,gfx1030,gfx1032,gfx1100,gfx1101,gfx1102' -ARG ROCM_DOCKER_ARCH=gfx1100 +ARG ROCM_DOCKER_ARCH='gfx803,gfx900,gfx906,gfx908,gfx90a,gfx942,gfx1010,gfx1030,gfx1032,gfx1100,gfx1101,gfx1102' +#ARG ROCM_DOCKER_ARCH=gfx1100 # Set nvcc architectured ENV AMDGPU_TARGETS=${ROCM_DOCKER_ARCH} @@ -40,7 +40,7 @@ WORKDIR /app COPY . . RUN HIPCXX="$(hipconfig -l)/clang" HIP_PATH="$(hipconfig -R)" \ - cmake -S . -B build -DGGML_HIP=ON -DAMDGPU_TARGETS=$ROCM_DOCKER_ARCH -DCMAKE_BUILD_TYPE=Release \ + cmake -S . -B build -DGGML_HIP=ON -DAMDGPU_TARGETS=$ROCM_DOCKER_ARCH -DGGML_BACKEND_DL=ON -DGGML_CPU_ALL_VARIANTS=ON -DCMAKE_BUILD_TYPE=Release -DLLAMA_CURL=ON \ && cmake --build build --config Release -j$(nproc) RUN mkdir -p /app/lib \ diff --git a/.devops/vulkan.Dockerfile b/.devops/vulkan.Dockerfile index 9064f383858fa..f8f3072e95768 100644 --- a/.devops/vulkan.Dockerfile +++ b/.devops/vulkan.Dockerfile @@ -16,7 +16,7 @@ WORKDIR /app COPY . . -RUN cmake -B build -DGGML_NATIVE=OFF -DGGML_VULKAN=1 -DLLAMA_CURL=1 && \ +RUN cmake -B build -DGGML_NATIVE=OFF -DGGML_VULKAN=1 -DLLAMA_CURL=1 -DGGML_BACKEND_DL=ON -DGGML_CPU_ALL_VARIANTS=ON && \ cmake --build build --config Release -j$(nproc) RUN mkdir -p /app/lib && \
diff --git a/.devops/cuda.Dockerfile b/.devops/cuda.Dockerfile index a196111e61d62..8ae57d2e289f4 100644 --- a/.devops/cuda.Dockerfile +++ b/.devops/cuda.Dockerfile @@ -21,7 +21,7 @@ COPY . . RUN if [ "${CUDA_DOCKER_ARCH}" != "default" ]; then \ export CMAKE_ARGS="-DCMAKE_CUDA_ARCHITECTURES=${CUDA_DOCKER_ARCH}"; \ - cmake -B build -DGGML_NATIVE=OFF -DGGML_CUDA=ON ${CMAKE_ARGS} -DCMAKE_EXE_LINKER_FLAGS=-Wl,--allow-shlib-undefined . && \ + cmake -B build -DGGML_NATIVE=OFF -DGGML_CUDA=ON -DLLAMA_CURL=ON -DGGML_BACKEND_DL=ON -DGGML_CPU_ALL_VARIANTS=ON ${CMAKE_ARGS} -DCMAKE_EXE_LINKER_FLAGS=-Wl,--allow-shlib-undefined . && \ diff --git a/.devops/intel.Dockerfile b/.devops/intel.Dockerfile index e2b381766f196..091e1dc5d8b2c 100644 --- a/.devops/intel.Dockerfile +++ b/.devops/intel.Dockerfile @@ -17,7 +17,7 @@ RUN if [ "${GGML_SYCL_F16}" = "ON" ]; then \ && export OPT_SYCL_F16="-DGGML_SYCL_F16=ON"; \ echo "Building with dynamic libs" && \ - cmake -B build -DGGML_NATIVE=OFF -DGGML_SYCL=ON -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx ${OPT_SYCL_F16} && \ + cmake -B build -DGGML_NATIVE=OFF -DGGML_SYCL=ON -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx -DLLAMA_CURL=ON -DGGML_BACKEND_DL=ON -DGGML_CPU_ALL_VARIANTS=ON ${OPT_SYCL_F16} && \ diff --git a/.devops/musa.Dockerfile b/.devops/musa.Dockerfile index e8297c6948c5c..261a2823a0e52 100644 --- a/.devops/musa.Dockerfile +++ b/.devops/musa.Dockerfile @@ -35,7 +35,7 @@ COPY . . RUN if [ "${MUSA_DOCKER_ARCH}" != "default" ]; then \ export CMAKE_ARGS="-DMUSA_ARCHITECTURES=${MUSA_DOCKER_ARCH}"; \ + cmake -B build -DGGML_NATIVE=OFF -DGGML_MUSA=ON -DLLAMA_CURL=ON -DGGML_BACKEND_DL=ON -DGGML_CPU_ALL_VARIANTS=ON ${CMAKE_ARGS} -DCMAKE_EXE_LINKER_FLAGS=-Wl,--allow-shlib-undefined . && \ diff --git a/.devops/rocm.Dockerfile b/.devops/rocm.Dockerfile index 66687a25ba068..a1b34723a46af 100644 --- a/.devops/rocm.Dockerfile +++ b/.devops/rocm.Dockerfile @@ -17,8 +17,8 @@ FROM ${BASE_ROCM_DEV_CONTAINER} AS build # gfx906 is deprecated #check https://rocm.docs.amd.com/projects/install-on-linux/en/docs-6.2.4/reference/system-requirements.html -#ARG ROCM_DOCKER_ARCH='gfx803,gfx900,gfx906,gfx908,gfx90a,gfx942,gfx1010,gfx1030,gfx1032,gfx1100,gfx1101,gfx1102' -ARG ROCM_DOCKER_ARCH=gfx1100 +ARG ROCM_DOCKER_ARCH='gfx803,gfx900,gfx906,gfx908,gfx90a,gfx942,gfx1010,gfx1030,gfx1032,gfx1100,gfx1101,gfx1102' # Set nvcc architectured ENV AMDGPU_TARGETS=${ROCM_DOCKER_ARCH} @@ -40,7 +40,7 @@ WORKDIR /app RUN HIPCXX="$(hipconfig -l)/clang" HIP_PATH="$(hipconfig -R)" \ - cmake -S . -B build -DGGML_HIP=ON -DAMDGPU_TARGETS=$ROCM_DOCKER_ARCH -DCMAKE_BUILD_TYPE=Release \ + cmake -S . -B build -DGGML_HIP=ON -DAMDGPU_TARGETS=$ROCM_DOCKER_ARCH -DGGML_BACKEND_DL=ON -DGGML_CPU_ALL_VARIANTS=ON -DCMAKE_BUILD_TYPE=Release -DLLAMA_CURL=ON \ && cmake --build build --config Release -j$(nproc) RUN mkdir -p /app/lib \ diff --git a/.devops/vulkan.Dockerfile b/.devops/vulkan.Dockerfile index 9064f383858fa..f8f3072e95768 100644 --- a/.devops/vulkan.Dockerfile +++ b/.devops/vulkan.Dockerfile @@ -16,7 +16,7 @@ WORKDIR /app -RUN cmake -B build -DGGML_NATIVE=OFF -DGGML_VULKAN=1 -DLLAMA_CURL=1 && \ +RUN cmake -B build -DGGML_NATIVE=OFF -DGGML_VULKAN=1 -DLLAMA_CURL=1 -DGGML_BACKEND_DL=ON -DGGML_CPU_ALL_VARIANTS=ON && \
[ "- cmake -B build -DGGML_NATIVE=OFF -DGGML_MUSA=ON ${CMAKE_ARGS} -DCMAKE_EXE_LINKER_FLAGS=-Wl,--allow-shlib-undefined . && \\", "+#ARG ROCM_DOCKER_ARCH=gfx1100" ]
[ 34, 50 ]
{ "additions": 7, "author": "rudiservo", "deletions": 7, "html_url": "https://github.com/ggml-org/llama.cpp/pull/12749", "issue_id": 12749, "merged_at": "2025-04-09T23:17:12Z", "omission_probability": 0.1, "pr_number": 12749, "repo": "ggml-org/llama.cpp", "title": "Added all CPU to Docker GPU images for 'token_embd.weight' compatibility", "total_changes": 14 }
834
diff --git a/examples/server/server.cpp b/examples/server/server.cpp index 760c3646433ad..1bf1ee876b40f 100644 --- a/examples/server/server.cpp +++ b/examples/server/server.cpp @@ -1705,6 +1705,8 @@ struct server_queue { }; struct server_response { + bool running = true; + // for keeping track of all tasks waiting for the result std::unordered_set<int> waiting_task_ids; @@ -1759,6 +1761,10 @@ struct server_response { while (true) { std::unique_lock<std::mutex> lock(mutex_results); condition_results.wait(lock, [&]{ + if (!running) { + SRV_DBG("%s : queue result stop\n", __func__); + std::terminate(); // we cannot return here since the caller is HTTP code + } return !queue_results.empty(); }); @@ -1789,6 +1795,10 @@ struct server_response { } std::cv_status cr_res = condition_results.wait_for(lock, std::chrono::seconds(timeout)); + if (!running) { + SRV_DBG("%s : queue result stop\n", __func__); + std::terminate(); // we cannot return here since the caller is HTTP code + } if (cr_res == std::cv_status::timeout) { return nullptr; } @@ -1818,6 +1828,12 @@ struct server_response { } } } + + // terminate the waiting loop + void terminate() { + running = false; + condition_results.notify_all(); + } }; struct server_context { @@ -4491,9 +4507,10 @@ int main(int argc, char ** argv) { svr->new_task_queue = [&params] { return new httplib::ThreadPool(params.n_threads_http); }; // clean up function, to be called before exit - auto clean_up = [&svr]() { + auto clean_up = [&svr, &ctx_server]() { SRV_INF("%s: cleaning up before exit...\n", __func__); svr->stop(); + ctx_server.queue_results.terminate(); llama_backend_free(); }; @@ -4534,7 +4551,7 @@ int main(int argc, char ** argv) { if (!ctx_server.load_model(params)) { clean_up(); - // t.join(); // FIXME: see below + t.join(); LOG_ERR("%s: exiting due to model loading error\n", __func__); return 1; } @@ -4582,7 +4599,7 @@ int main(int argc, char ** argv) { ctx_server.queue_tasks.start_loop(); clean_up(); - // t.join(); // FIXME: http thread may stuck if there is an on-going request. we don't need to care about this for now as the HTTP connection will already be closed at this point, but it's better to fix this + t.join(); return 0; }
diff --git a/examples/server/server.cpp b/examples/server/server.cpp index 760c3646433ad..1bf1ee876b40f 100644 --- a/examples/server/server.cpp +++ b/examples/server/server.cpp @@ -1705,6 +1705,8 @@ struct server_queue { struct server_response { + bool running = true; // for keeping track of all tasks waiting for the result std::unordered_set<int> waiting_task_ids; @@ -1759,6 +1761,10 @@ struct server_response { while (true) { std::unique_lock<std::mutex> lock(mutex_results); condition_results.wait(lock, [&]{ + SRV_DBG("%s : queue result stop\n", __func__); + std::terminate(); // we cannot return here since the caller is HTTP code + } return !queue_results.empty(); }); @@ -1789,6 +1795,10 @@ struct server_response { std::cv_status cr_res = condition_results.wait_for(lock, std::chrono::seconds(timeout)); + if (!running) { + std::terminate(); // we cannot return here since the caller is HTTP code + } if (cr_res == std::cv_status::timeout) { return nullptr; @@ -1818,6 +1828,12 @@ struct server_response { } + // terminate the waiting loop + void terminate() { + running = false; + condition_results.notify_all(); + } struct server_context { @@ -4491,9 +4507,10 @@ int main(int argc, char ** argv) { svr->new_task_queue = [&params] { return new httplib::ThreadPool(params.n_threads_http); }; // clean up function, to be called before exit - auto clean_up = [&svr]() { + auto clean_up = [&svr, &ctx_server]() { SRV_INF("%s: cleaning up before exit...\n", __func__); svr->stop(); + ctx_server.queue_results.terminate(); llama_backend_free(); }; @@ -4534,7 +4551,7 @@ int main(int argc, char ** argv) { if (!ctx_server.load_model(params)) { clean_up(); - // t.join(); // FIXME: see below + t.join(); LOG_ERR("%s: exiting due to model loading error\n", __func__); return 1; @@ -4582,7 +4599,7 @@ int main(int argc, char ** argv) { ctx_server.queue_tasks.start_loop(); clean_up(); - // t.join(); // FIXME: http thread may stuck if there is an on-going request. we don't need to care about this for now as the HTTP connection will already be closed at this point, but it's better to fix this + t.join(); return 0; }
[ "+ if (!running) {", "+ SRV_DBG(\"%s : queue result stop\\n\", __func__);" ]
[ 17, 29 ]
{ "additions": 20, "author": "ngxson", "deletions": 3, "html_url": "https://github.com/ggml-org/llama.cpp/pull/12831", "issue_id": 12831, "merged_at": "2025-04-08T16:37:06Z", "omission_probability": 0.1, "pr_number": 12831, "repo": "ggml-org/llama.cpp", "title": "server : fix thread.join() on exit", "total_changes": 23 }
835
diff --git a/examples/server/utils.hpp b/examples/server/utils.hpp index 55cf3230d90ce..aba2f27f9b564 100644 --- a/examples/server/utils.hpp +++ b/examples/server/utils.hpp @@ -3,7 +3,7 @@ #include "common.h" #include "log.h" #include "llama.h" -#include "common/base64.hpp" +#include "base64.hpp" // increase max payload length to allow use of larger context size #define CPPHTTPLIB_FORM_URL_ENCODED_PAYLOAD_MAX_LENGTH 1048576 diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index b340dae5b28cd..9f7ab13f1e620 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -32,7 +32,7 @@ add_library(llama unicode.h ) -target_include_directories(llama PUBLIC . ../include ../common) +target_include_directories(llama PUBLIC . ../include) target_compile_features (llama PUBLIC cxx_std_17) # don't bump target_link_libraries(llama PUBLIC ggml) diff --git a/tests/test-chat-template.cpp b/tests/test-chat-template.cpp index a9627df684922..be1a640068dc7 100644 --- a/tests/test-chat-template.cpp +++ b/tests/test-chat-template.cpp @@ -19,6 +19,8 @@ static std::string normalize_newlines(const std::string & s) { #endif } +#define U8C(x) (const char*)(u8##x) + static common_chat_msg simple_msg(const std::string & role, const std::string & content) { common_chat_msg msg; msg.role = role; @@ -35,6 +37,8 @@ int main(void) { {"assistant", " I am an assistant "}, {"user", "Another question"}, }; + + // std::string wrong = /* .template_str= */ u8"[gMASK]<sop>{% for item in messages %}{% if item['tools'] is defined %}<|system|>\n你是一个名为 ChatGLM 的人工智能助手。你是基于智谱AI训练的语言模型 GLM-4 模型开发的,你的任务是针对用户的问题和要求提供适当的答复和支持。\n\n# 可用工具{% set tools = item['tools'] %}{% for tool in tools %}{% if tool['type'] == 'function' %}\n\n## {{ tool['function']['name'] }}\n\n{{ tool['function'] | tojson(indent=4) }}\n......{% endif %}{% endfor %}{% endif %}{% if item['content'] %}<|{{ item['role'] }}|>{{ item['metadata'] }}\n{{ item['content'] }}{% endif %}{% endfor %}{% if add_generation_prompt %}<|assistant|>{% endif %}"; struct TestCase { std::string name; std::string template_str; @@ -177,7 +181,7 @@ int main(void) { }, { /* .name= */ "ChatGLM4", - /* .template_str= */ u8"[gMASK]<sop>{% for item in messages %}{% if item['tools'] is defined %}<|system|>\n你是一个名为 ChatGLM 的人工智能助手。你是基于智谱AI训练的语言模型 GLM-4 模型开发的,你的任务是针对用户的问题和要求提供适当的答复和支持。\n\n# 可用工具{% set tools = item['tools'] %}{% for tool in tools %}{% if tool['type'] == 'function' %}\n\n## {{ tool['function']['name'] }}\n\n{{ tool['function'] | tojson(indent=4) }}\n......{% endif %}{% endfor %}{% endif %}{% if item['content'] %}<|{{ item['role'] }}|>{{ item['metadata'] }}\n{{ item['content'] }}{% endif %}{% endfor %}{% if add_generation_prompt %}<|assistant|>{% endif %}", + /* .template_str= */ U8C("[gMASK]<sop>{% for item in messages %}{% if item['tools'] is defined %}<|system|>\n你是一个名为 ChatGLM 的人工智能助手。你是基于智谱AI训练的语言模型 GLM-4 模型开发的,你的任务是针对用户的问题和要求提供适当的答复和支持。\n\n# 可用工具{% set tools = item['tools'] %}{% for tool in tools %}{% if tool['type'] == 'function' %}\n\n## {{ tool['function']['name'] }}\n\n{{ tool['function'] | tojson(indent=4) }}\n......{% endif %}{% endfor %}{% endif %}{% if item['content'] %}<|{{ item['role'] }}|>{{ item['metadata'] }}\n{{ item['content'] }}{% endif %}{% endfor %}{% if add_generation_prompt %}<|assistant|>{% endif %}"), /* .expected_output= */ "[gMASK]<sop><|system|>\nYou are a helpful assistant<|user|>\nHello<|assistant|>\nHi there<|user|>\nWho are you<|assistant|>\n I am an assistant <|user|>\nAnother question<|assistant|>", /* .expected_output_jinja= */ "", /* .bos_token= */ "", @@ -193,8 +197,8 @@ int main(void) { }, { /* .name= */ "MiniCPM-3B-OpenHermes-2.5-v2-GGUF", - /* .template_str= */ u8"{% for message in messages %}{% if message['role'] == 'user' %}{{'<用户>' + message['content'].strip() + '<AI>'}}{% else %}{{message['content'].strip()}}{% endif %}{% endfor %}", - /* .expected_output= */ u8"You are a helpful assistant<用户>Hello<AI>Hi there<用户>Who are you<AI>I am an assistant<用户>Another question<AI>", + /* .template_str= */ U8C("{% for message in messages %}{% if message['role'] == 'user' %}{{'<用户>' + message['content'].strip() + '<AI>'}}{% else %}{{message['content'].strip()}}{% endif %}{% endfor %}"), + /* .expected_output= */ U8C("You are a helpful assistant<用户>Hello<AI>Hi there<用户>Who are you<AI>I am an assistant<用户>Another question<AI>"), /* .expected_output_jinja= */ "", /* .bos_token= */ "", /* .eos_token= */ "", @@ -202,7 +206,7 @@ int main(void) { { /* .name= */ "DeepSeek-V2", /* .template_str= */ "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{{ bos_token }}{% for message in messages %}{% if message['role'] == 'user' %}{{ 'User: ' + message['content'] + '\n\n' }}{% elif message['role'] == 'assistant' %}{{ 'Assistant: ' + message['content'] + eos_token }}{% elif message['role'] == 'system' %}{{ message['content'] + '\n\n' }}{% endif %}{% endfor %}{% if add_generation_prompt %}{{ 'Assistant:' }}{% endif %}", - /* .expected_output= */ u8"You are a helpful assistant\n\nUser: Hello\n\nAssistant: Hi there<|end▁of▁sentence|>User: Who are you\n\nAssistant: I am an assistant <|end▁of▁sentence|>User: Another question\n\nAssistant:", + /* .expected_output= */ U8C("You are a helpful assistant\n\nUser: Hello\n\nAssistant: Hi there<|end▁of▁sentence|>User: Who are you\n\nAssistant: I am an assistant <|end▁of▁sentence|>User: Another question\n\nAssistant:"), /* .expected_output_jinja= */ "", /* .bos_token= */ "", /* .eos_token= */ "<|end▁of▁sentence|>", @@ -256,7 +260,7 @@ int main(void) { }, { /* .name= */ "Infinigence/Megrez-3B-Instruct", - /* .template_str= */ u8"{% for message in messages %}{% if loop.first and messages[0]['role'] != 'system' %}{{ '<|role_start|>system<|role_end|>你是Megrez-3B-Instruct,将针对用户的问题给出详细的、积极的回答。<|turn_end|>' }}{% endif %}{{ '<|role_start|>' + message['role'] + '<|role_end|>' + message['content'] + '<|turn_end|>' }}{% endfor %}{% if add_generation_prompt %}{{ '<|role_start|>assistant<|role_end|>' }}{% endif %}", + /* .template_str= */ U8C("{% for message in messages %}{% if loop.first and messages[0]['role'] != 'system' %}{{ '<|role_start|>system<|role_end|>你是Megrez-3B-Instruct,将针对用户的问题给出详细的、积极的回答。<|turn_end|>' }}{% endif %}{{ '<|role_start|>' + message['role'] + '<|role_end|>' + message['content'] + '<|turn_end|>' }}{% endfor %}{% if add_generation_prompt %}{{ '<|role_start|>assistant<|role_end|>' }}{% endif %}"), /* .expected_output= */ "<|role_start|>system<|role_end|>You are a helpful assistant<|turn_end|><|role_start|>user<|role_end|>Hello<|turn_end|><|role_start|>assistant<|role_end|>Hi there<|turn_end|><|role_start|>user<|role_end|>Who are you<|turn_end|><|role_start|>assistant<|role_end|> I am an assistant <|turn_end|><|role_start|>user<|role_end|>Another question<|turn_end|><|role_start|>assistant<|role_end|>", /* .expected_output_jinja= */ "", /* .bos_token= */ "",
diff --git a/examples/server/utils.hpp b/examples/server/utils.hpp index 55cf3230d90ce..aba2f27f9b564 100644 --- a/examples/server/utils.hpp +++ b/examples/server/utils.hpp @@ -3,7 +3,7 @@ #include "common.h" #include "log.h" #include "llama.h" -#include "common/base64.hpp" +#include "base64.hpp" // increase max payload length to allow use of larger context size #define CPPHTTPLIB_FORM_URL_ENCODED_PAYLOAD_MAX_LENGTH 1048576 diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index b340dae5b28cd..9f7ab13f1e620 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -32,7 +32,7 @@ add_library(llama unicode.h ) -target_include_directories(llama PUBLIC . ../include ../common) target_compile_features (llama PUBLIC cxx_std_17) # don't bump target_link_libraries(llama PUBLIC ggml) diff --git a/tests/test-chat-template.cpp b/tests/test-chat-template.cpp index a9627df684922..be1a640068dc7 100644 --- a/tests/test-chat-template.cpp +++ b/tests/test-chat-template.cpp @@ -19,6 +19,8 @@ static std::string normalize_newlines(const std::string & s) { #endif } +#define U8C(x) (const char*)(u8##x) static common_chat_msg simple_msg(const std::string & role, const std::string & content) { common_chat_msg msg; msg.role = role; @@ -35,6 +37,8 @@ int main(void) { {"assistant", " I am an assistant "}, {"user", "Another question"}, }; + // std::string wrong = /* .template_str= */ u8"[gMASK]<sop>{% for item in messages %}{% if item['tools'] is defined %}<|system|>\n你是一个名为 ChatGLM 的人工智能助手。你是基于智谱AI训练的语言模型 GLM-4 模型开发的,你的任务是针对用户的问题和要求提供适当的答复和支持。\n\n# 可用工具{% set tools = item['tools'] %}{% for tool in tools %}{% if tool['type'] == 'function' %}\n\n## {{ tool['function']['name'] }}\n\n{{ tool['function'] | tojson(indent=4) }}\n......{% endif %}{% endfor %}{% endif %}{% if item['content'] %}<|{{ item['role'] }}|>{{ item['metadata'] }}\n{{ item['content'] }}{% endif %}{% endfor %}{% if add_generation_prompt %}<|assistant|>{% endif %}"; struct TestCase { std::string name; std::string template_str; @@ -177,7 +181,7 @@ int main(void) { /* .name= */ "ChatGLM4", - /* .template_str= */ u8"[gMASK]<sop>{% for item in messages %}{% if item['tools'] is defined %}<|system|>\n你是一个名为 ChatGLM 的人工智能助手。你是基于智谱AI训练的语言模型 GLM-4 模型开发的,你的任务是针对用户的问题和要求提供适当的答复和支持。\n\n# 可用工具{% set tools = item['tools'] %}{% for tool in tools %}{% if tool['type'] == 'function' %}\n\n## {{ tool['function']['name'] }}\n\n{{ tool['function'] | tojson(indent=4) }}\n......{% endif %}{% endfor %}{% endif %}{% if item['content'] %}<|{{ item['role'] }}|>{{ item['metadata'] }}\n{{ item['content'] }}{% endif %}{% endfor %}{% if add_generation_prompt %}<|assistant|>{% endif %}", + /* .template_str= */ U8C("[gMASK]<sop>{% for item in messages %}{% if item['tools'] is defined %}<|system|>\n你是一个名为 ChatGLM 的人工智能助手。你是基于智谱AI训练的语言模型 GLM-4 模型开发的,你的任务是针对用户的问题和要求提供适当的答复和支持。\n\n# 可用工具{% set tools = item['tools'] %}{% for tool in tools %}{% if tool['type'] == 'function' %}\n\n## {{ tool['function']['name'] }}\n\n{{ tool['function'] | tojson(indent=4) }}\n......{% endif %}{% endfor %}{% endif %}{% if item['content'] %}<|{{ item['role'] }}|>{{ item['metadata'] }}\n{{ item['content'] }}{% endif %}{% endfor %}{% if add_generation_prompt %}<|assistant|>{% endif %}"), /* .expected_output= */ "[gMASK]<sop><|system|>\nYou are a helpful assistant<|user|>\nHello<|assistant|>\nHi there<|user|>\nWho are you<|assistant|>\n I am an assistant <|user|>\nAnother question<|assistant|>", @@ -193,8 +197,8 @@ int main(void) { /* .name= */ "MiniCPM-3B-OpenHermes-2.5-v2-GGUF", - /* .template_str= */ u8"{% for message in messages %}{% if message['role'] == 'user' %}{{'<用户>' + message['content'].strip() + '<AI>'}}{% else %}{{message['content'].strip()}}{% endif %}{% endfor %}", - /* .expected_output= */ u8"You are a helpful assistant<用户>Hello<AI>Hi there<用户>Who are you<AI>I am an assistant<用户>Another question<AI>", + /* .template_str= */ U8C("{% for message in messages %}{% if message['role'] == 'user' %}{{'<用户>' + message['content'].strip() + '<AI>'}}{% else %}{{message['content'].strip()}}{% endif %}{% endfor %}"), + /* .expected_output= */ U8C("You are a helpful assistant<用户>Hello<AI>Hi there<用户>Who are you<AI>I am an assistant<用户>Another question<AI>"), /* .eos_token= */ "", @@ -202,7 +206,7 @@ int main(void) { /* .name= */ "DeepSeek-V2", /* .template_str= */ "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{{ bos_token }}{% for message in messages %}{% if message['role'] == 'user' %}{{ 'User: ' + message['content'] + '\n\n' }}{% elif message['role'] == 'assistant' %}{{ 'Assistant: ' + message['content'] + eos_token }}{% elif message['role'] == 'system' %}{{ message['content'] + '\n\n' }}{% endif %}{% endfor %}{% if add_generation_prompt %}{{ 'Assistant:' }}{% endif %}", - /* .expected_output= */ u8"You are a helpful assistant\n\nUser: Hello\n\nAssistant: Hi there<|end▁of▁sentence|>User: Who are you\n\nAssistant: I am an assistant <|end▁of▁sentence|>User: Another question\n\nAssistant:", + /* .expected_output= */ U8C("You are a helpful assistant\n\nUser: Hello\n\nAssistant: Hi there<|end▁of▁sentence|>User: Who are you\n\nAssistant: I am an assistant <|end▁of▁sentence|>User: Another question\n\nAssistant:"), /* .eos_token= */ "<|end▁of▁sentence|>", @@ -256,7 +260,7 @@ int main(void) { /* .name= */ "Infinigence/Megrez-3B-Instruct", - /* .template_str= */ u8"{% for message in messages %}{% if loop.first and messages[0]['role'] != 'system' %}{{ '<|role_start|>system<|role_end|>你是Megrez-3B-Instruct,将针对用户的问题给出详细的、积极的回答。<|turn_end|>' }}{% endif %}{{ '<|role_start|>' + message['role'] + '<|role_end|>' + message['content'] + '<|turn_end|>' }}{% endfor %}{% if add_generation_prompt %}{{ '<|role_start|>assistant<|role_end|>' }}{% endif %}", /* .expected_output= */ "<|role_start|>system<|role_end|>You are a helpful assistant<|turn_end|><|role_start|>user<|role_end|>Hello<|turn_end|><|role_start|>assistant<|role_end|>Hi there<|turn_end|><|role_start|>user<|role_end|>Who are you<|turn_end|><|role_start|>assistant<|role_end|> I am an assistant <|turn_end|><|role_start|>user<|role_end|>Another question<|turn_end|><|role_start|>assistant<|role_end|>",
[ "+target_include_directories(llama PUBLIC . ../include)", "+ /* .template_str= */ U8C(\"{% for message in messages %}{% if loop.first and messages[0]['role'] != 'system' %}{{ '<|role_start|>system<|role_end|>你是Megrez-3B-Instruct,将针对用户的问题给出详细的、积极的回答。<|turn_end|>' }}{% endif %}{{ '<|role_start|>' + message['role'] + '<|role_end|>' + message['content'] + '<|turn_end|>' }}{% endfor %}{% if add_generation_prompt %}{{ '<|role_start|>assistant<|role_end|>' }}{% endif %}\")," ]
[ 22, 82 ]
{ "additions": 11, "author": "pminev", "deletions": 7, "html_url": "https://github.com/ggml-org/llama.cpp/pull/12827", "issue_id": 12827, "merged_at": "2025-04-09T08:11:11Z", "omission_probability": 0.1, "pr_number": 12827, "repo": "ggml-org/llama.cpp", "title": "ci: detach common from the library", "total_changes": 18 }
836
diff --git a/ggml/src/ggml-cuda/cpy.cu b/ggml/src/ggml-cuda/cpy.cu index ed853ee6c15a2..4f4faa3e63ae7 100644 --- a/ggml/src/ggml-cuda/cpy.cu +++ b/ggml/src/ggml-cuda/cpy.cu @@ -10,6 +10,13 @@ static __device__ void cpy_1_f32_f32(const char * cxi, char * cdsti) { *dsti = *xi; } +static __device__ void cpy_1_f32_bf16(const char * cxi, char * cdsti) { + const float * xi = (const float *) cxi; + nv_bfloat16 * dsti = (nv_bfloat16 *) cdsti; + + *dsti = *xi; +} + static __device__ void cpy_1_f32_f16(const char * cxi, char * cdsti) { const float * xi = (const float *) cxi; half * dsti = (half *) cdsti; @@ -386,6 +393,16 @@ static void ggml_cpy_f32_f32_cuda( (cx, cdst, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13, cdst_indirect, graph_cpynode_index++); } +static void ggml_cpy_f32_bf16_cuda( + const char * cx, char * cdst, const int ne, + const int ne00, const int ne01, const int ne02, const int nb00, const int nb01, const int nb02, + const int nb03, const int ne10, const int ne11, const int ne12, const int nb10, const int nb11, const int nb12, const int nb13, cudaStream_t stream, char ** cdst_indirect, int & graph_cpynode_index) { + + const int num_blocks = (ne + CUDA_CPY_BLOCK_SIZE - 1) / CUDA_CPY_BLOCK_SIZE; + cpy_f32_f16<cpy_1_f32_bf16><<<num_blocks, CUDA_CPY_BLOCK_SIZE, 0, stream>>> + (cx, cdst, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13, cdst_indirect, graph_cpynode_index++); +} + static void ggml_cpy_f32_f16_cuda( const char * cx, char * cdst, const int ne, const int ne00, const int ne01, const int ne02, const int nb00, const int nb01, const int nb02, @@ -581,6 +598,8 @@ void ggml_cuda_cpy(ggml_backend_cuda_context & ctx, const ggml_tensor * src0, gg CUDA_CHECK(cudaMemcpyAsync(src1_ddc, src0_ddc, ggml_nbytes(src0), cudaMemcpyDeviceToDevice, main_stream)); } else if (src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_F32) { ggml_cpy_f32_f32_cuda (src0_ddc, src1_ddc, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13, main_stream, dest_ptrs_d, graph_cpynode_index); + } else if (src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_BF16) { + ggml_cpy_f32_bf16_cuda(src0_ddc, src1_ddc, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13, main_stream, dest_ptrs_d, graph_cpynode_index); } else if (src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_F16) { ggml_cpy_f32_f16_cuda (src0_ddc, src1_ddc, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13, main_stream, dest_ptrs_d, graph_cpynode_index); } else if (src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_Q8_0) { @@ -634,6 +653,8 @@ void* ggml_cuda_cpy_fn(const ggml_tensor * src0, ggml_tensor * src1) { return nullptr; } else if (src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_F32) { return (void*) cpy_f32_f16<cpy_1_f32_f32>; + } else if (src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_BF16) { + return (void*) cpy_f32_f16<cpy_1_f32_bf16>; } else if (src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_F16) { return (void*) cpy_f32_f16<cpy_1_f32_f16>; } else if (src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_Q8_0) { diff --git a/ggml/src/ggml-cuda/ggml-cuda.cu b/ggml/src/ggml-cuda/ggml-cuda.cu index 78717df1a6095..633456a92d0de 100644 --- a/ggml/src/ggml-cuda/ggml-cuda.cu +++ b/ggml/src/ggml-cuda/ggml-cuda.cu @@ -3079,6 +3079,9 @@ static bool ggml_backend_cuda_device_supports_op(ggml_backend_dev_t dev, const g if (src0_type == GGML_TYPE_F32 && src1_type == GGML_TYPE_F32) { return true; } + if (src0_type == GGML_TYPE_F32 && src1_type == GGML_TYPE_BF16) { + return true; + } if (src0_type == GGML_TYPE_F32 && src1_type == GGML_TYPE_F16) { return true; }
diff --git a/ggml/src/ggml-cuda/cpy.cu b/ggml/src/ggml-cuda/cpy.cu index ed853ee6c15a2..4f4faa3e63ae7 100644 --- a/ggml/src/ggml-cuda/cpy.cu +++ b/ggml/src/ggml-cuda/cpy.cu @@ -10,6 +10,13 @@ static __device__ void cpy_1_f32_f32(const char * cxi, char * cdsti) { *dsti = *xi; +static __device__ void cpy_1_f32_bf16(const char * cxi, char * cdsti) { + nv_bfloat16 * dsti = (nv_bfloat16 *) cdsti; + *dsti = *xi; static __device__ void cpy_1_f32_f16(const char * cxi, char * cdsti) { const float * xi = (const float *) cxi; half * dsti = (half *) cdsti; @@ -386,6 +393,16 @@ static void ggml_cpy_f32_f32_cuda( (cx, cdst, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13, cdst_indirect, graph_cpynode_index++); +static void ggml_cpy_f32_bf16_cuda( + const char * cx, char * cdst, const int ne, + const int ne00, const int ne01, const int ne02, const int nb00, const int nb01, const int nb02, + const int nb03, const int ne10, const int ne11, const int ne12, const int nb10, const int nb11, const int nb12, const int nb13, cudaStream_t stream, char ** cdst_indirect, int & graph_cpynode_index) { + const int num_blocks = (ne + CUDA_CPY_BLOCK_SIZE - 1) / CUDA_CPY_BLOCK_SIZE; + cpy_f32_f16<cpy_1_f32_bf16><<<num_blocks, CUDA_CPY_BLOCK_SIZE, 0, stream>>> + (cx, cdst, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13, cdst_indirect, graph_cpynode_index++); static void ggml_cpy_f32_f16_cuda( const char * cx, char * cdst, const int ne, const int ne00, const int ne01, const int ne02, const int nb00, const int nb01, const int nb02, @@ -581,6 +598,8 @@ void ggml_cuda_cpy(ggml_backend_cuda_context & ctx, const ggml_tensor * src0, gg CUDA_CHECK(cudaMemcpyAsync(src1_ddc, src0_ddc, ggml_nbytes(src0), cudaMemcpyDeviceToDevice, main_stream)); ggml_cpy_f32_f32_cuda (src0_ddc, src1_ddc, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13, main_stream, dest_ptrs_d, graph_cpynode_index); + ggml_cpy_f32_bf16_cuda(src0_ddc, src1_ddc, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13, main_stream, dest_ptrs_d, graph_cpynode_index); ggml_cpy_f32_f16_cuda (src0_ddc, src1_ddc, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13, main_stream, dest_ptrs_d, graph_cpynode_index); @@ -634,6 +653,8 @@ void* ggml_cuda_cpy_fn(const ggml_tensor * src0, ggml_tensor * src1) { return nullptr; return (void*) cpy_f32_f16<cpy_1_f32_f32>; + return (void*) cpy_f32_f16<cpy_1_f32_bf16>; return (void*) cpy_f32_f16<cpy_1_f32_f16>; diff --git a/ggml/src/ggml-cuda/ggml-cuda.cu b/ggml/src/ggml-cuda/ggml-cuda.cu index 78717df1a6095..633456a92d0de 100644 --- a/ggml/src/ggml-cuda/ggml-cuda.cu +++ b/ggml/src/ggml-cuda/ggml-cuda.cu @@ -3079,6 +3079,9 @@ static bool ggml_backend_cuda_device_supports_op(ggml_backend_dev_t dev, const g if (src0_type == GGML_TYPE_F32 && src1_type == GGML_TYPE_F32) { + if (src0_type == GGML_TYPE_F32 && src1_type == GGML_TYPE_BF16) { + return true; + } if (src0_type == GGML_TYPE_F32 && src1_type == GGML_TYPE_F16) {
[ "+ const float * xi = (const float *) cxi;" ]
[ 9 ]
{ "additions": 24, "author": "CISC", "deletions": 0, "html_url": "https://github.com/ggml-org/llama.cpp/pull/12806", "issue_id": 12806, "merged_at": "2025-04-08T21:21:32Z", "omission_probability": 0.1, "pr_number": 12806, "repo": "ggml-org/llama.cpp", "title": "cuda : add f32 to bf16 copy op", "total_changes": 24 }
837
diff --git a/examples/llava/clip.cpp b/examples/llava/clip.cpp index e9520f3d1a378..44428cc959553 100644 --- a/examples/llava/clip.cpp +++ b/examples/llava/clip.cpp @@ -380,6 +380,7 @@ struct clip_ctx { if (backend_cpu != backend) { ggml_backend_free(backend_cpu); } + clip_image_size_free(load_image_size); } }; @@ -1618,6 +1619,12 @@ struct clip_image_f32 * clip_image_f32_init() { return new clip_image_f32(); } +void clip_image_size_free(struct clip_image_size * load_image_size) { + if (load_image_size == nullptr) { + return; + } + delete load_image_size; +} void clip_image_u8_free(struct clip_image_u8 * img) { delete img; } void clip_image_f32_free(struct clip_image_f32 * img) { delete img; } void clip_image_u8_batch_free(struct clip_image_u8_batch * batch) { @@ -2270,6 +2277,9 @@ ggml_tensor * clip_get_newline_tensor(const struct clip_ctx * ctx) { } void clip_free(clip_ctx * ctx) { + if (ctx == nullptr) { + return; + } delete ctx; } diff --git a/examples/llava/clip.h b/examples/llava/clip.h index d806465bf68bb..87aa61574b1eb 100644 --- a/examples/llava/clip.h +++ b/examples/llava/clip.h @@ -77,6 +77,7 @@ CLIP_API struct clip_image_size * clip_image_size_init(); CLIP_API struct clip_image_u8 * clip_image_u8_init (); CLIP_API struct clip_image_f32 * clip_image_f32_init(); +CLIP_API void clip_image_size_free (struct clip_image_size * img_size); CLIP_API void clip_image_u8_free (struct clip_image_u8 * img); CLIP_API void clip_image_f32_free(struct clip_image_f32 * img); CLIP_API void clip_image_u8_batch_free (struct clip_image_u8_batch * batch);
diff --git a/examples/llava/clip.cpp b/examples/llava/clip.cpp index e9520f3d1a378..44428cc959553 100644 --- a/examples/llava/clip.cpp +++ b/examples/llava/clip.cpp @@ -380,6 +380,7 @@ struct clip_ctx { if (backend_cpu != backend) { ggml_backend_free(backend_cpu); } + clip_image_size_free(load_image_size); } }; @@ -1618,6 +1619,12 @@ struct clip_image_f32 * clip_image_f32_init() { return new clip_image_f32(); +void clip_image_size_free(struct clip_image_size * load_image_size) { + delete load_image_size; +} void clip_image_u8_free(struct clip_image_u8 * img) { delete img; } void clip_image_f32_free(struct clip_image_f32 * img) { delete img; } void clip_image_u8_batch_free(struct clip_image_u8_batch * batch) { @@ -2270,6 +2277,9 @@ ggml_tensor * clip_get_newline_tensor(const struct clip_ctx * ctx) { void clip_free(clip_ctx * ctx) { + if (ctx == nullptr) { delete ctx; diff --git a/examples/llava/clip.h b/examples/llava/clip.h index d806465bf68bb..87aa61574b1eb 100644 --- a/examples/llava/clip.h +++ b/examples/llava/clip.h @@ -77,6 +77,7 @@ CLIP_API struct clip_image_size * clip_image_size_init(); CLIP_API struct clip_image_u8 * clip_image_u8_init (); CLIP_API struct clip_image_f32 * clip_image_f32_init(); +CLIP_API void clip_image_size_free (struct clip_image_size * img_size); CLIP_API void clip_image_u8_free (struct clip_image_u8 * img); CLIP_API void clip_image_f32_free(struct clip_image_f32 * img); CLIP_API void clip_image_u8_batch_free (struct clip_image_u8_batch * batch);
[ "+ if (load_image_size == nullptr) {" ]
[ 17 ]
{ "additions": 11, "author": "mattjcly", "deletions": 0, "html_url": "https://github.com/ggml-org/llama.cpp/pull/12834", "issue_id": 12834, "merged_at": "2025-04-08T20:01:58Z", "omission_probability": 0.1, "pr_number": 12834, "repo": "ggml-org/llama.cpp", "title": "llava: improve clip_ctx destructor to not memleak load_image_size", "total_changes": 11 }
838
diff --git a/examples/server/tests/unit/test_embedding.py b/examples/server/tests/unit/test_embedding.py index 8b0eb42b0926f..0feb452ccfcd4 100644 --- a/examples/server/tests/unit/test_embedding.py +++ b/examples/server/tests/unit/test_embedding.py @@ -49,6 +49,26 @@ def test_embedding_multiple(): assert len(d['embedding']) > 1 +def test_embedding_multiple_with_fa(): + server = ServerPreset.bert_bge_small_with_fa() + server.pooling = 'last' + server.start() + # one of these should trigger the FA branch (i.e. context size % 256 == 0) + res = server.make_request("POST", "/v1/embeddings", data={ + "input": [ + "a "*253, + "b "*254, + "c "*255, + "d "*256, + ], + }) + assert res.status_code == 200 + assert len(res.body['data']) == 4 + for d in res.body['data']: + assert 'embedding' in d + assert len(d['embedding']) > 1 + + @pytest.mark.parametrize( "input,is_multi_prompt", [ diff --git a/examples/server/tests/utils.py b/examples/server/tests/utils.py index 30aa8660950a1..4dc2062a8e5b9 100644 --- a/examples/server/tests/utils.py +++ b/examples/server/tests/utils.py @@ -323,6 +323,21 @@ def bert_bge_small() -> ServerProcess: server.server_embeddings = True return server + @staticmethod + def bert_bge_small_with_fa() -> ServerProcess: + server = ServerProcess() + server.model_hf_repo = "ggml-org/models" + server.model_hf_file = "bert-bge-small/ggml-model-f16.gguf" + server.model_alias = "bert-bge-small" + server.n_ctx = 1024 + server.n_batch = 300 + server.n_ubatch = 300 + server.n_slots = 2 + server.fa = True + server.seed = 42 + server.server_embeddings = True + return server + @staticmethod def tinyllama_infill() -> ServerProcess: server = ServerProcess() diff --git a/examples/server_embd.py b/examples/server_embd.py index 0e34c6ceab9ca..f8b0ffecd8f47 100644 --- a/examples/server_embd.py +++ b/examples/server_embd.py @@ -15,7 +15,7 @@ async def main(): model_url = "http://127.0.0.1:6900" responses: list[requests.Response] = await asyncio.gather(*[requests_post_async( url= f"{model_url}/embedding", - json= {"content": str(0)*1024} + json= {"content": "a "*1022} ) for i in range(n)]) for response in responses: diff --git a/ggml/src/ggml-cpu/ops.cpp b/ggml/src/ggml-cpu/ops.cpp index 7a8d5ac6fd9d0..f63656be54f5c 100644 --- a/ggml/src/ggml-cpu/ops.cpp +++ b/ggml/src/ggml-cpu/ops.cpp @@ -6721,8 +6721,8 @@ static void ggml_compute_forward_flash_attn_ext_f16( ggml_vec_dot_t const kq_vec_dot = ggml_get_type_traits_cpu(k->type)->vec_dot; ggml_to_float_t const v_to_float = ggml_get_type_traits(v->type)->to_float; - GGML_ASSERT(q_to_vec_dot && "fattn: unsupported K-type"); - GGML_ASSERT(v_to_float && "fattn: unsupported V-type"); + GGML_ASSERT(( q_to_vec_dot) && "fattn: unsupported K-type"); + GGML_ASSERT((v->type == GGML_TYPE_F32 || v_to_float ) && "fattn: unsupported V-type"); // loop over n_batch and n_head for (int ir = ir0; ir < ir1; ++ir) { @@ -6818,10 +6818,14 @@ static void ggml_compute_forward_flash_attn_ext_f16( vs = expf(s - M); } - v_to_float(v_data, V32, DV); - // V += v*expf(s - M) - ggml_vec_mad_f32(DV, VKQ32, V32, vs); + if (v_to_float) { + v_to_float(v_data, V32, DV); + ggml_vec_mad_f32(DV, VKQ32, V32, vs); + } else { + // V is F32 + ggml_vec_mad_f32(DV, VKQ32, (const float *) v_data, vs); + } } S = S*ms + vs; // scale and increment sum with partial sum diff --git a/ggml/src/ggml-metal/ggml-metal.m b/ggml/src/ggml-metal/ggml-metal.m index 456e1fd994c40..f226826020a5a 100644 --- a/ggml/src/ggml-metal/ggml-metal.m +++ b/ggml/src/ggml-metal/ggml-metal.m @@ -1345,6 +1345,11 @@ static bool ggml_metal_supports_op(const struct ggml_backend_metal_device_contex case GGML_OP_ARANGE: return true; case GGML_OP_FLASH_ATTN_EXT: + if (op->src[0]->ne[0] == 32) { + // head size == 32 (e.g. bert-bge-small) + // TODO: not sure if it is worth adding kernels for this size + return false; + } if (op->src[1]->type != op->src[2]->type) { return false; } diff --git a/src/llama-graph.cpp b/src/llama-graph.cpp index c3469177e091c..cd955d63bc390 100644 --- a/src/llama-graph.cpp +++ b/src/llama-graph.cpp @@ -1215,6 +1215,15 @@ ggml_tensor * llm_graph_context::build_attn_mha( v = ggml_transpose(ctx0, v); } + // this can happen when KV cache is not used (e.g. an embedding model with non-causal attn) + if (k->type == GGML_TYPE_F32) { + k = ggml_cast(ctx0, k, GGML_TYPE_F16); + } + + if (v->type == GGML_TYPE_F32) { + v = ggml_cast(ctx0, v, GGML_TYPE_F16); + } + cur = ggml_flash_attn_ext(ctx0, q, k, v, kq_mask, kq_scale, hparams.f_max_alibi_bias, hparams.attn_soft_cap ? hparams.f_attn_logit_softcapping : 0.0f);
diff --git a/examples/server/tests/unit/test_embedding.py b/examples/server/tests/unit/test_embedding.py index 8b0eb42b0926f..0feb452ccfcd4 100644 --- a/examples/server/tests/unit/test_embedding.py +++ b/examples/server/tests/unit/test_embedding.py @@ -49,6 +49,26 @@ def test_embedding_multiple(): assert len(d['embedding']) > 1 +def test_embedding_multiple_with_fa(): + server = ServerPreset.bert_bge_small_with_fa() + server.pooling = 'last' + server.start() + # one of these should trigger the FA branch (i.e. context size % 256 == 0) + res = server.make_request("POST", "/v1/embeddings", data={ + "input": [ + "a "*253, + "b "*254, + "d "*256, + ], + }) + assert res.status_code == 200 + assert len(res.body['data']) == 4 + assert 'embedding' in d + assert len(d['embedding']) > 1 @pytest.mark.parametrize( "input,is_multi_prompt", [ diff --git a/examples/server/tests/utils.py b/examples/server/tests/utils.py index 30aa8660950a1..4dc2062a8e5b9 100644 --- a/examples/server/tests/utils.py +++ b/examples/server/tests/utils.py @@ -323,6 +323,21 @@ def bert_bge_small() -> ServerProcess: server.server_embeddings = True return server + @staticmethod + def bert_bge_small_with_fa() -> ServerProcess: + server = ServerProcess() + server.model_hf_repo = "ggml-org/models" + server.model_hf_file = "bert-bge-small/ggml-model-f16.gguf" + server.model_alias = "bert-bge-small" + server.n_ctx = 1024 + server.n_batch = 300 + server.n_ubatch = 300 + server.n_slots = 2 + server.fa = True + server.seed = 42 + return server @staticmethod def tinyllama_infill() -> ServerProcess: server = ServerProcess() diff --git a/examples/server_embd.py b/examples/server_embd.py index 0e34c6ceab9ca..f8b0ffecd8f47 100644 --- a/examples/server_embd.py +++ b/examples/server_embd.py @@ -15,7 +15,7 @@ async def main(): model_url = "http://127.0.0.1:6900" responses: list[requests.Response] = await asyncio.gather(*[requests_post_async( url= f"{model_url}/embedding", - json= {"content": str(0)*1024} + json= {"content": "a "*1022} ) for i in range(n)]) for response in responses: diff --git a/ggml/src/ggml-cpu/ops.cpp b/ggml/src/ggml-cpu/ops.cpp index 7a8d5ac6fd9d0..f63656be54f5c 100644 --- a/ggml/src/ggml-cpu/ops.cpp +++ b/ggml/src/ggml-cpu/ops.cpp @@ -6721,8 +6721,8 @@ static void ggml_compute_forward_flash_attn_ext_f16( ggml_vec_dot_t const kq_vec_dot = ggml_get_type_traits_cpu(k->type)->vec_dot; ggml_to_float_t const v_to_float = ggml_get_type_traits(v->type)->to_float; - GGML_ASSERT(q_to_vec_dot && "fattn: unsupported K-type"); - GGML_ASSERT(v_to_float && "fattn: unsupported V-type"); + GGML_ASSERT(( q_to_vec_dot) && "fattn: unsupported K-type"); // loop over n_batch and n_head for (int ir = ir0; ir < ir1; ++ir) { @@ -6818,10 +6818,14 @@ static void ggml_compute_forward_flash_attn_ext_f16( vs = expf(s - M); } - v_to_float(v_data, V32, DV); - // V += v*expf(s - M) - ggml_vec_mad_f32(DV, VKQ32, V32, vs); + if (v_to_float) { + v_to_float(v_data, V32, DV); + ggml_vec_mad_f32(DV, VKQ32, V32, vs); + } else { + // V is F32 + ggml_vec_mad_f32(DV, VKQ32, (const float *) v_data, vs); + } S = S*ms + vs; // scale and increment sum with partial sum diff --git a/ggml/src/ggml-metal/ggml-metal.m b/ggml/src/ggml-metal/ggml-metal.m index 456e1fd994c40..f226826020a5a 100644 --- a/ggml/src/ggml-metal/ggml-metal.m +++ b/ggml/src/ggml-metal/ggml-metal.m @@ -1345,6 +1345,11 @@ static bool ggml_metal_supports_op(const struct ggml_backend_metal_device_contex case GGML_OP_ARANGE: return true; case GGML_OP_FLASH_ATTN_EXT: + if (op->src[0]->ne[0] == 32) { + // head size == 32 (e.g. bert-bge-small) + return false; + } if (op->src[1]->type != op->src[2]->type) { return false; diff --git a/src/llama-graph.cpp b/src/llama-graph.cpp index c3469177e091c..cd955d63bc390 100644 --- a/src/llama-graph.cpp +++ b/src/llama-graph.cpp @@ -1215,6 +1215,15 @@ ggml_tensor * llm_graph_context::build_attn_mha( v = ggml_transpose(ctx0, v); } + // this can happen when KV cache is not used (e.g. an embedding model with non-causal attn) + if (k->type == GGML_TYPE_F32) { + k = ggml_cast(ctx0, k, GGML_TYPE_F16); + if (v->type == GGML_TYPE_F32) { + v = ggml_cast(ctx0, v, GGML_TYPE_F16); cur = ggml_flash_attn_ext(ctx0, q, k, v, kq_mask, kq_scale, hparams.f_max_alibi_bias, hparams.attn_soft_cap ? hparams.f_attn_logit_softcapping : 0.0f);
[ "+ \"c \"*255,", "+ for d in res.body['data']:", "+ server.server_embeddings = True", "+ GGML_ASSERT((v->type == GGML_TYPE_F32 || v_to_float ) && \"fattn: unsupported V-type\");", "+ // TODO: not sure if it is worth adding kernels for this size" ]
[ 17, 23, 51, 81, 113 ]
{ "additions": 59, "author": "ggerganov", "deletions": 6, "html_url": "https://github.com/ggml-org/llama.cpp/pull/12825", "issue_id": 12825, "merged_at": "2025-04-08T16:54:52Z", "omission_probability": 0.1, "pr_number": 12825, "repo": "ggml-org/llama.cpp", "title": "llama : fix FA when KV cache is not used (i.e. embeddings)", "total_changes": 65 }
839
diff --git a/lib/internal/readline/interface.js b/lib/internal/readline/interface.js index 872fb8994cd214..429aa41d4bfabf 100644 --- a/lib/internal/readline/interface.js +++ b/lib/internal/readline/interface.js @@ -599,9 +599,6 @@ class Interface extends InterfaceConstructor { * @returns {void | Interface} */ pause() { - if (this.closed) { - throw new ERR_USE_AFTER_CLOSE('readline'); - } if (this.paused) return; this.input.pause(); this.paused = true; @@ -614,9 +611,6 @@ class Interface extends InterfaceConstructor { * @returns {void | Interface} */ resume() { - if (this.closed) { - throw new ERR_USE_AFTER_CLOSE('readline'); - } if (!this.paused) return; this.input.resume(); this.paused = false; @@ -637,9 +631,6 @@ class Interface extends InterfaceConstructor { * @returns {void} */ write(d, key) { - if (this.closed) { - throw new ERR_USE_AFTER_CLOSE('readline'); - } if (this.paused) this.resume(); if (this.terminal) { this[kTtyWrite](d, key); diff --git a/test/parallel/test-readline-interface.js b/test/parallel/test-readline-interface.js index c640654a7c742d..12ba0c709622e9 100644 --- a/test/parallel/test-readline-interface.js +++ b/test/parallel/test-readline-interface.js @@ -1202,47 +1202,6 @@ for (let i = 0; i < 12; i++) { fi.emit('data', 'Node.js\n'); } - // Call write after close - { - const [rli, fi] = getInterface({ terminal }); - rli.question('What\'s your name?', common.mustCall((name) => { - assert.strictEqual(name, 'Node.js'); - rli.close(); - assert.throws(() => { - rli.write('I said Node.js'); - }, { - name: 'Error', - code: 'ERR_USE_AFTER_CLOSE' - }); - })); - fi.emit('data', 'Node.js\n'); - } - - // Call pause/resume after close - { - const [rli, fi] = getInterface({ terminal }); - rli.question('What\'s your name?', common.mustCall((name) => { - assert.strictEqual(name, 'Node.js'); - rli.close(); - // No 'resume' nor 'pause' event should be emitted after close - rli.on('resume', common.mustNotCall()); - rli.on('pause', common.mustNotCall()); - assert.throws(() => { - rli.pause(); - }, { - name: 'Error', - code: 'ERR_USE_AFTER_CLOSE' - }); - assert.throws(() => { - rli.resume(); - }, { - name: 'Error', - code: 'ERR_USE_AFTER_CLOSE' - }); - })); - fi.emit('data', 'Node.js\n'); - } - // Can create a new readline Interface with a null output argument { const [rli, fi] = getInterface({ output: null, terminal }); diff --git a/test/parallel/test-readline-promises-interface.js b/test/parallel/test-readline-promises-interface.js index 12d72f49735401..32aab1b60c2ee5 100644 --- a/test/parallel/test-readline-promises-interface.js +++ b/test/parallel/test-readline-promises-interface.js @@ -204,7 +204,7 @@ function assertCursorRowsAndCols(rli, rows, cols) { fi.emit('data', character); } fi.emit('data', '\n'); - fi.end(); + rli.close(); } // \t when there is no completer function should behave like an ordinary diff --git a/test/parallel/test-readline-promises-tab-complete.js b/test/parallel/test-readline-promises-tab-complete.js index 602bdd9e7965bf..d8b0ac30ee779d 100644 --- a/test/parallel/test-readline-promises-tab-complete.js +++ b/test/parallel/test-readline-promises-tab-complete.js @@ -80,7 +80,7 @@ if (process.env.TERM === 'dumb') { output = ''; }); } - fi.end(); + rli.close(); }); }); }); @@ -114,5 +114,5 @@ if (process.env.TERM === 'dumb') { assert.match(output, /^Tab completion error: Error: message/); output = ''; }); - fi.end(); + rli.close(); } diff --git a/test/parallel/test-repl-import-referrer.js b/test/parallel/test-repl-import-referrer.js index 9c3e961e5e1585..1c12567fcd5068 100644 --- a/test/parallel/test-repl-import-referrer.js +++ b/test/parallel/test-repl-import-referrer.js @@ -8,24 +8,20 @@ const args = ['--interactive']; const opts = { cwd: fixtures.path('es-modules') }; const child = cp.spawn(process.execPath, args, opts); -const outputs = []; +let output = ''; child.stdout.setEncoding('utf8'); child.stdout.on('data', (data) => { - outputs.push(data); - if (outputs.length === 3) { - // All the expected outputs have been received - // so we can close the child process's stdin - child.stdin.end(); - } + output += data; }); child.on('exit', common.mustCall(() => { - const results = outputs[2].split('\n')[0]; - assert.strictEqual( + const results = output.replace(/^> /mg, '').split('\n').slice(2); + assert.deepStrictEqual( results, - '[Module: null prototype] { message: \'A message\' }' + ['[Module: null prototype] { message: \'A message\' }', ''] ); })); child.stdin.write('await import(\'./message.mjs\');\n'); child.stdin.write('.exit'); +child.stdin.end(); diff --git a/test/parallel/test-repl-no-terminal.js b/test/parallel/test-repl-no-terminal.js index f569adcc6322cf..60f97b52e26942 100644 --- a/test/parallel/test-repl-no-terminal.js +++ b/test/parallel/test-repl-no-terminal.js @@ -1,12 +1,7 @@ 'use strict'; const common = require('../common'); -const ArrayStream = require('../common/arraystream'); -const repl = require('repl'); - -const stream = new ArrayStream(); -const replServer = repl.start({ terminal: false, input: stream, output: stream }); - -replServer.setupHistory('/nonexistent/file', common.mustSucceed(() => { - replServer.close(); -})); +const repl = require('repl'); +const r = repl.start({ terminal: false }); +r.setupHistory('/nonexistent/file', common.mustSucceed()); +process.stdin.unref?.(); diff --git a/test/parallel/test-repl-uncaught-exception-async.js b/test/parallel/test-repl-uncaught-exception-async.js index 24710e062e0b75..366a4e6f2968af 100644 --- a/test/parallel/test-repl-uncaught-exception-async.js +++ b/test/parallel/test-repl-uncaught-exception-async.js @@ -34,9 +34,9 @@ r.write( ' throw new RangeError("abc");\n' + '}, 1);console.log()\n' ); +r.close(); setTimeout(() => { - r.close(); const len = process.listenerCount('uncaughtException'); process.removeAllListeners('uncaughtException'); assert.strictEqual(len, 0);
diff --git a/lib/internal/readline/interface.js b/lib/internal/readline/interface.js index 872fb8994cd214..429aa41d4bfabf 100644 --- a/lib/internal/readline/interface.js +++ b/lib/internal/readline/interface.js @@ -599,9 +599,6 @@ class Interface extends InterfaceConstructor { pause() { if (this.paused) return; this.input.pause(); this.paused = true; @@ -614,9 +611,6 @@ class Interface extends InterfaceConstructor { resume() { if (!this.paused) return; this.input.resume(); this.paused = false; @@ -637,9 +631,6 @@ class Interface extends InterfaceConstructor { * @returns {void} write(d, key) { if (this.paused) this.resume(); if (this.terminal) { this[kTtyWrite](d, key); diff --git a/test/parallel/test-readline-interface.js b/test/parallel/test-readline-interface.js index c640654a7c742d..12ba0c709622e9 100644 --- a/test/parallel/test-readline-interface.js +++ b/test/parallel/test-readline-interface.js @@ -1202,47 +1202,6 @@ for (let i = 0; i < 12; i++) { fi.emit('data', 'Node.js\n'); - rli.write('I said Node.js'); - // Call pause/resume after close - // No 'resume' nor 'pause' event should be emitted after close - rli.on('resume', common.mustNotCall()); - rli.on('pause', common.mustNotCall()); - rli.pause(); - rli.resume(); // Can create a new readline Interface with a null output argument { const [rli, fi] = getInterface({ output: null, terminal }); diff --git a/test/parallel/test-readline-promises-interface.js b/test/parallel/test-readline-promises-interface.js index 12d72f49735401..32aab1b60c2ee5 100644 --- a/test/parallel/test-readline-promises-interface.js +++ b/test/parallel/test-readline-promises-interface.js @@ -204,7 +204,7 @@ function assertCursorRowsAndCols(rli, rows, cols) { fi.emit('data', character); fi.emit('data', '\n'); // \t when there is no completer function should behave like an ordinary diff --git a/test/parallel/test-readline-promises-tab-complete.js b/test/parallel/test-readline-promises-tab-complete.js index 602bdd9e7965bf..d8b0ac30ee779d 100644 --- a/test/parallel/test-readline-promises-tab-complete.js +++ b/test/parallel/test-readline-promises-tab-complete.js @@ -80,7 +80,7 @@ if (process.env.TERM === 'dumb') { output = ''; }); } - fi.end(); + rli.close(); }); @@ -114,5 +114,5 @@ if (process.env.TERM === 'dumb') { assert.match(output, /^Tab completion error: Error: message/); output = ''; diff --git a/test/parallel/test-repl-import-referrer.js b/test/parallel/test-repl-import-referrer.js index 9c3e961e5e1585..1c12567fcd5068 100644 --- a/test/parallel/test-repl-import-referrer.js +++ b/test/parallel/test-repl-import-referrer.js @@ -8,24 +8,20 @@ const args = ['--interactive']; const opts = { cwd: fixtures.path('es-modules') }; const child = cp.spawn(process.execPath, args, opts); -const outputs = []; +let output = ''; child.stdout.setEncoding('utf8'); child.stdout.on('data', (data) => { - outputs.push(data); - if (outputs.length === 3) { - // All the expected outputs have been received - // so we can close the child process's stdin - child.stdin.end(); + output += data; child.on('exit', common.mustCall(() => { - const results = outputs[2].split('\n')[0]; - assert.strictEqual( + const results = output.replace(/^> /mg, '').split('\n').slice(2); + assert.deepStrictEqual( results, - '[Module: null prototype] { message: \'A message\' }' + ['[Module: null prototype] { message: \'A message\' }', ''] ); })); child.stdin.write('await import(\'./message.mjs\');\n'); child.stdin.write('.exit'); +child.stdin.end(); diff --git a/test/parallel/test-repl-no-terminal.js b/test/parallel/test-repl-no-terminal.js index f569adcc6322cf..60f97b52e26942 100644 --- a/test/parallel/test-repl-no-terminal.js +++ b/test/parallel/test-repl-no-terminal.js @@ -1,12 +1,7 @@ 'use strict'; const common = require('../common'); -const ArrayStream = require('../common/arraystream'); -const stream = new ArrayStream(); -const replServer = repl.start({ terminal: false, input: stream, output: stream }); -replServer.setupHistory('/nonexistent/file', common.mustSucceed(() => { - replServer.close(); -})); +const repl = require('repl'); +const r = repl.start({ terminal: false }); +r.setupHistory('/nonexistent/file', common.mustSucceed()); diff --git a/test/parallel/test-repl-uncaught-exception-async.js b/test/parallel/test-repl-uncaught-exception-async.js index 24710e062e0b75..366a4e6f2968af 100644 --- a/test/parallel/test-repl-uncaught-exception-async.js +++ b/test/parallel/test-repl-uncaught-exception-async.js @@ -34,9 +34,9 @@ r.write( ' throw new RangeError("abc");\n' + '}, 1);console.log()\n' ); +r.close(); setTimeout(() => { - r.close(); const len = process.listenerCount('uncaughtException'); process.removeAllListeners('uncaughtException'); assert.strictEqual(len, 0);
[ "- // Call write after close", "-const repl = require('repl');", "+process.stdin.unref?.();" ]
[ 42, 162, 174 ]
{ "additions": 14, "author": "dario-piotrowicz", "deletions": 73, "html_url": "https://github.com/nodejs/node/pull/58024", "issue_id": 58024, "merged_at": "2025-04-25T22:45:58Z", "omission_probability": 0.1, "pr_number": 58024, "repo": "nodejs/node", "title": "Revert \"readline: add stricter validation for functions called after close", "total_changes": 87 }
840
diff --git a/src/node_options-inl.h b/src/node_options-inl.h index 24954e0b583834..55078af457fc7c 100644 --- a/src/node_options-inl.h +++ b/src/node_options-inl.h @@ -3,7 +3,9 @@ #if defined(NODE_WANT_INTERNALS) && NODE_WANT_INTERNALS +#include <algorithm> #include <cstdlib> +#include <ranges> #include "node_options.h" #include "util.h" @@ -393,15 +395,16 @@ void OptionsParser<Options>::Parse( // Implications for negated options are defined with "--no-". implied_name.insert(2, "no-"); } - auto implications = implications_.equal_range(implied_name); - for (auto imp = implications.first; imp != implications.second; ++imp) { - if (imp->second.type == kV8Option) { - v8_args->push_back(imp->second.name); - } else { - *imp->second.target_field->template Lookup<bool>(options) = - imp->second.target_value; - } - } + auto [f, l] = implications_.equal_range(implied_name); + std::ranges::for_each(std::ranges::subrange(f, l) | std::views::values, + [&](const auto& value) { + if (value.type == kV8Option) { + v8_args->push_back(value.name); + } else { + *value.target_field->template Lookup<bool>( + options) = value.target_value; + } + }); } if (it == options_.end()) {
diff --git a/src/node_options-inl.h b/src/node_options-inl.h index 24954e0b583834..55078af457fc7c 100644 --- a/src/node_options-inl.h +++ b/src/node_options-inl.h @@ -3,7 +3,9 @@ #if defined(NODE_WANT_INTERNALS) && NODE_WANT_INTERNALS +#include <algorithm> #include <cstdlib> +#include <ranges> #include "node_options.h" #include "util.h" @@ -393,15 +395,16 @@ void OptionsParser<Options>::Parse( // Implications for negated options are defined with "--no-". implied_name.insert(2, "no-"); } - for (auto imp = implications.first; imp != implications.second; ++imp) { - if (imp->second.type == kV8Option) { - v8_args->push_back(imp->second.name); - } else { - *imp->second.target_field->template Lookup<bool>(options) = - imp->second.target_value; - } - } + auto [f, l] = implications_.equal_range(implied_name); + [&](const auto& value) { + if (value.type == kV8Option) { + v8_args->push_back(value.name); + *value.target_field->template Lookup<bool>( + options) = value.target_value; + } + }); } if (it == options_.end()) {
[ "- auto implications = implications_.equal_range(implied_name);", "+ std::ranges::for_each(std::ranges::subrange(f, l) | std::views::values,", "+ } else {" ]
[ 18, 28, 32 ]
{ "additions": 12, "author": "lemire", "deletions": 9, "html_url": "https://github.com/nodejs/node/pull/57975", "issue_id": 57975, "merged_at": "2025-04-25T17:52:42Z", "omission_probability": 0.1, "pr_number": 57975, "repo": "nodejs/node", "title": "src: use ranges library (C++20) to simplify code", "total_changes": 21 }
841
diff --git a/lib/internal/main/watch_mode.js b/lib/internal/main/watch_mode.js index 60639efb45482d..6ccc90436e6201 100644 --- a/lib/internal/main/watch_mode.js +++ b/lib/internal/main/watch_mode.js @@ -79,10 +79,11 @@ function start() { } child.once('exit', (code) => { exited = true; + const waitingForChanges = 'Waiting for file changes before restarting...'; if (code === 0) { - process.stdout.write(`${blue}Completed running ${kCommandStr}${white}\n`); + process.stdout.write(`${blue}Completed running ${kCommandStr}. ${waitingForChanges}${white}\n`); } else { - process.stdout.write(`${red}Failed running ${kCommandStr}${white}\n`); + process.stdout.write(`${red}Failed running ${kCommandStr}. ${waitingForChanges}${white}\n`); } }); return child; diff --git a/test/sequential/test-watch-mode.mjs b/test/sequential/test-watch-mode.mjs index 324cdd10b3b4ef..bb8b895351493e 100644 --- a/test/sequential/test-watch-mode.mjs +++ b/test/sequential/test-watch-mode.mjs @@ -171,10 +171,10 @@ describe('watch mode', { concurrency: !process.env.TEST_PARALLEL, timeout: 60_00 assert.strictEqual(stderr, ''); assert.deepStrictEqual(stdout, [ 'running', - `Completed running ${inspect(file)}`, + `Completed running ${inspect(file)}. Waiting for file changes before restarting...`, `Restarting ${inspect(file)}`, 'running', - `Completed running ${inspect(file)}`, + `Completed running ${inspect(file)}. Waiting for file changes before restarting...`, ]); }); @@ -185,10 +185,10 @@ describe('watch mode', { concurrency: !process.env.TEST_PARALLEL, timeout: 60_00 assert.strictEqual(stderr, ''); assert.deepStrictEqual(stdout, [ 'running', - `Completed running ${inspect(file)}`, + `Completed running ${inspect(file)}. Waiting for file changes before restarting...`, `Restarting ${inspect(file)}`, 'running', - `Completed running ${inspect(file)}`, + `Completed running ${inspect(file)}. Waiting for file changes before restarting...`, ]); }); @@ -209,7 +209,7 @@ describe('watch mode', { concurrency: !process.env.TEST_PARALLEL, timeout: 60_00 assert.deepStrictEqual(stdout, [ `Restarting ${inspect(jsFile)}`, 'ENV: value2', - `Completed running ${inspect(jsFile)}`, + `Completed running ${inspect(jsFile)}. Waiting for file changes before restarting...`, ]); } finally { await done(); @@ -235,7 +235,7 @@ describe('watch mode', { concurrency: !process.env.TEST_PARALLEL, timeout: 60_00 `Restarting ${inspect(jsFile)}`, 'ENV: value1', 'ENV2: newValue', - `Completed running ${inspect(jsFile)}`, + `Completed running ${inspect(jsFile)}. Waiting for file changes before restarting...`, ]); } finally { await done(); @@ -261,7 +261,7 @@ describe('watch mode', { concurrency: !process.env.TEST_PARALLEL, timeout: 60_00 `Restarting ${inspect(jsFile)}`, 'ENV: value1', 'ENV2: newValue', - `Completed running ${inspect(jsFile)}`, + `Completed running ${inspect(jsFile)}. Waiting for file changes before restarting...`, ]); } finally { await done(); @@ -279,9 +279,9 @@ describe('watch mode', { concurrency: !process.env.TEST_PARALLEL, timeout: 60_00 assert.match(stderr, /Error: fails\r?\n/); assert.deepStrictEqual(stdout, [ - `Failed running ${inspect(file)}`, + `Failed running ${inspect(file)}. Waiting for file changes before restarting...`, `Restarting ${inspect(file)}`, - `Failed running ${inspect(file)}`, + `Failed running ${inspect(file)}. Waiting for file changes before restarting...`, ]); }); @@ -298,10 +298,10 @@ describe('watch mode', { concurrency: !process.env.TEST_PARALLEL, timeout: 60_00 assert.strictEqual(stderr, ''); assert.deepStrictEqual(stdout, [ 'running', - `Completed running ${inspect(file)}`, + `Completed running ${inspect(file)}. Waiting for file changes before restarting...`, `Restarting ${inspect(file)}`, 'running', - `Completed running ${inspect(file)}`, + `Completed running ${inspect(file)}. Waiting for file changes before restarting...`, ]); assert.strictEqual(stderr, ''); }); @@ -324,9 +324,9 @@ describe('watch mode', { concurrency: !process.env.TEST_PARALLEL, timeout: 60_00 assert.match(stderr, /Error: Cannot find module/g); assert.deepStrictEqual(stdout, [ - `Failed running ${inspect(file)}`, + `Failed running ${inspect(file)}. Waiting for file changes before restarting...`, `Restarting ${inspect(file)}`, - `Failed running ${inspect(file)}`, + `Failed running ${inspect(file)}. Waiting for file changes before restarting...`, ]); }); @@ -348,9 +348,9 @@ describe('watch mode', { concurrency: !process.env.TEST_PARALLEL, timeout: 60_00 assert.match(stderr, /Error: Cannot find module/g); assert.deepStrictEqual(stdout, [ - `Failed running ${inspect(file)}`, + `Failed running ${inspect(file)}. Waiting for file changes before restarting...`, `Restarting ${inspect(file)}`, - `Failed running ${inspect(file)}`, + `Failed running ${inspect(file)}. Waiting for file changes before restarting...`, ]); }); @@ -380,10 +380,10 @@ console.log(dependency); assert.strictEqual(stderr, ''); assert.deepStrictEqual(stdout, [ '{}', - `Completed running ${inspect(file)}`, + `Completed running ${inspect(file)}. Waiting for file changes before restarting...`, `Restarting ${inspect(file)}`, '{}', - `Completed running ${inspect(file)}`, + `Completed running ${inspect(file)}. Waiting for file changes before restarting...`, ]); }); @@ -398,10 +398,10 @@ console.log(dependency); assert.strictEqual(stderr, ''); assert.deepStrictEqual(stdout, [ '{}', - `Completed running ${inspect(file)}`, + `Completed running ${inspect(file)}. Waiting for file changes before restarting...`, `Restarting ${inspect(file)}`, '{}', - `Completed running ${inspect(file)}`, + `Completed running ${inspect(file)}. Waiting for file changes before restarting...`, ]); }); @@ -412,13 +412,13 @@ console.log(dependency); assert.strictEqual(stderr, ''); assert.deepStrictEqual(stdout, [ 'running', - `Completed running ${inspect(file)}`, + `Completed running ${inspect(file)}. Waiting for file changes before restarting...`, `Restarting ${inspect(file)}`, 'running', - `Completed running ${inspect(file)}`, + `Completed running ${inspect(file)}. Waiting for file changes before restarting...`, `Restarting ${inspect(file)}`, 'running', - `Completed running ${inspect(file)}`, + `Completed running ${inspect(file)}. Waiting for file changes before restarting...`, ]); }); @@ -435,10 +435,10 @@ console.log(values.random); assert.strictEqual(stderr, ''); assert.deepStrictEqual(stdout, [ random, - `Completed running ${inspect(`${file} --random ${random}`)}`, + `Completed running ${inspect(`${file} --random ${random}`)}. Waiting for file changes before restarting...`, `Restarting ${inspect(`${file} --random ${random}`)}`, random, - `Completed running ${inspect(`${file} --random ${random}`)}`, + `Completed running ${inspect(`${file} --random ${random}`)}. Waiting for file changes before restarting...`, ]); }); @@ -452,10 +452,10 @@ console.log(values.random); assert.notStrictEqual(pid, importPid); assert.deepStrictEqual(stdout, [ 'running', - `Completed running ${inspect(file)}`, + `Completed running ${inspect(file)}. Waiting for file changes before restarting...`, `Restarting ${inspect(file)}`, 'running', - `Completed running ${inspect(file)}`, + `Completed running ${inspect(file)}. Waiting for file changes before restarting...`, ]); }); @@ -472,10 +472,10 @@ console.log(values.random); assert.deepStrictEqual(stdout, [ 'running', - `Completed running ${inspect(file)}`, + `Completed running ${inspect(file)}. Waiting for file changes before restarting...`, `Restarting ${inspect(file)}`, 'running', - `Completed running ${inspect(file)}`, + `Completed running ${inspect(file)}. Waiting for file changes before restarting...`, ]); }); @@ -521,10 +521,10 @@ console.log(values.random); assert.strictEqual(stderr, ''); assert.deepStrictEqual(stdout, [ 'running', - `Completed running ${inspect(file)}`, + `Completed running ${inspect(file)}. Waiting for file changes before restarting...`, `Restarting ${inspect(file)}`, 'running', - `Completed running ${inspect(file)}`, + `Completed running ${inspect(file)}. Waiting for file changes before restarting...`, ]); }); @@ -536,10 +536,10 @@ console.log(values.random); assert.strictEqual(stderr, ''); assert.deepStrictEqual(stdout, [ 'running', - `Completed running ${inspect(file)}`, + `Completed running ${inspect(file)}. Waiting for file changes before restarting...`, `Restarting ${inspect(file)}`, 'running', - `Completed running ${inspect(file)}`, + `Completed running ${inspect(file)}. Waiting for file changes before restarting...`, ]); }); @@ -567,11 +567,11 @@ console.log(values.random); assert.deepStrictEqual(stdout, [ 'hello', 'running', - `Completed running ${inspect(file)}`, + `Completed running ${inspect(file)}. Waiting for file changes before restarting...`, `Restarting ${inspect(file)}`, 'hello', 'running', - `Completed running ${inspect(file)}`, + `Completed running ${inspect(file)}. Waiting for file changes before restarting...`, ]); }); @@ -599,11 +599,11 @@ console.log(values.random); assert.deepStrictEqual(stdout, [ 'hello', 'running', - `Completed running ${inspect(file)}`, + `Completed running ${inspect(file)}. Waiting for file changes before restarting...`, `Restarting ${inspect(file)}`, 'hello', 'running', - `Completed running ${inspect(file)}`, + `Completed running ${inspect(file)}. Waiting for file changes before restarting...`, ]); }); @@ -631,11 +631,11 @@ console.log(values.random); assert.deepStrictEqual(stdout, [ 'hello', 'running', - `Completed running ${inspect(file)}`, + `Completed running ${inspect(file)}. Waiting for file changes before restarting...`, `Restarting ${inspect(file)}`, 'hello', 'running', - `Completed running ${inspect(file)}`, + `Completed running ${inspect(file)}. Waiting for file changes before restarting...`, ]); }); @@ -663,11 +663,11 @@ console.log(values.random); assert.deepStrictEqual(stdout, [ 'hello', 'running', - `Completed running ${inspect(file)}`, + `Completed running ${inspect(file)}. Waiting for file changes before restarting...`, `Restarting ${inspect(file)}`, 'hello', 'running', - `Completed running ${inspect(file)}`, + `Completed running ${inspect(file)}. Waiting for file changes before restarting...`, ]); }); @@ -679,10 +679,10 @@ console.log(values.random); assert.match(stderr, /listening on ws:\/\//); assert.deepStrictEqual(stdout, [ 'running', - `Completed running ${inspect(file)}`, + `Completed running ${inspect(file)}. Waiting for file changes before restarting...`, `Restarting ${inspect(file)}`, 'running', - `Completed running ${inspect(file)}`, + `Completed running ${inspect(file)}. Waiting for file changes before restarting...`, ]); }); @@ -704,11 +704,11 @@ console.log(values.random); assert.deepStrictEqual(stdout, [ 'hello', 'running', - `Completed running ${inspect(file)}`, + `Completed running ${inspect(file)}. Waiting for file changes before restarting...`, `Restarting ${inspect(file)}`, 'hello', 'running', - `Completed running ${inspect(file)}`, + `Completed running ${inspect(file)}. Waiting for file changes before restarting...`, ]); }); @@ -788,7 +788,7 @@ process.on('message', (message) => { `Restarting ${inspect(file)}`, 'running', 'Received: second message', - `Completed running ${inspect(file)}`, + `Completed running ${inspect(file)}. Waiting for file changes before restarting...`, ]); }); });
diff --git a/lib/internal/main/watch_mode.js b/lib/internal/main/watch_mode.js index 60639efb45482d..6ccc90436e6201 100644 --- a/lib/internal/main/watch_mode.js +++ b/lib/internal/main/watch_mode.js @@ -79,10 +79,11 @@ function start() { } child.once('exit', (code) => { exited = true; + const waitingForChanges = 'Waiting for file changes before restarting...'; if (code === 0) { - process.stdout.write(`${blue}Completed running ${kCommandStr}${white}\n`); + process.stdout.write(`${blue}Completed running ${kCommandStr}. ${waitingForChanges}${white}\n`); } else { - process.stdout.write(`${red}Failed running ${kCommandStr}${white}\n`); + process.stdout.write(`${red}Failed running ${kCommandStr}. ${waitingForChanges}${white}\n`); } return child; diff --git a/test/sequential/test-watch-mode.mjs b/test/sequential/test-watch-mode.mjs index 324cdd10b3b4ef..bb8b895351493e 100644 --- a/test/sequential/test-watch-mode.mjs +++ b/test/sequential/test-watch-mode.mjs @@ -171,10 +171,10 @@ describe('watch mode', { concurrency: !process.env.TEST_PARALLEL, timeout: 60_00 @@ -185,10 +185,10 @@ describe('watch mode', { concurrency: !process.env.TEST_PARALLEL, timeout: 60_00 @@ -209,7 +209,7 @@ describe('watch mode', { concurrency: !process.env.TEST_PARALLEL, timeout: 60_00 assert.deepStrictEqual(stdout, [ 'ENV: value2', @@ -235,7 +235,7 @@ describe('watch mode', { concurrency: !process.env.TEST_PARALLEL, timeout: 60_00 @@ -261,7 +261,7 @@ describe('watch mode', { concurrency: !process.env.TEST_PARALLEL, timeout: 60_00 @@ -279,9 +279,9 @@ describe('watch mode', { concurrency: !process.env.TEST_PARALLEL, timeout: 60_00 assert.match(stderr, /Error: fails\r?\n/); @@ -298,10 +298,10 @@ describe('watch mode', { concurrency: !process.env.TEST_PARALLEL, timeout: 60_00 @@ -324,9 +324,9 @@ describe('watch mode', { concurrency: !process.env.TEST_PARALLEL, timeout: 60_00 @@ -348,9 +348,9 @@ describe('watch mode', { concurrency: !process.env.TEST_PARALLEL, timeout: 60_00 @@ -380,10 +380,10 @@ console.log(dependency); @@ -398,10 +398,10 @@ console.log(dependency); @@ -412,13 +412,13 @@ console.log(dependency); @@ -435,10 +435,10 @@ console.log(values.random); `Restarting ${inspect(`${file} --random ${random}`)}`, @@ -452,10 +452,10 @@ console.log(values.random); assert.notStrictEqual(pid, importPid); @@ -472,10 +472,10 @@ console.log(values.random); @@ -521,10 +521,10 @@ console.log(values.random); @@ -536,10 +536,10 @@ console.log(values.random); @@ -567,11 +567,11 @@ console.log(values.random); @@ -599,11 +599,11 @@ console.log(values.random); @@ -631,11 +631,11 @@ console.log(values.random); @@ -663,11 +663,11 @@ console.log(values.random); @@ -679,10 +679,10 @@ console.log(values.random); assert.match(stderr, /listening on ws:\/\//); @@ -704,11 +704,11 @@ console.log(values.random); @@ -788,7 +788,7 @@ process.on('message', (message) => { 'Received: second message', });
[]
[]
{ "additions": 48, "author": "dario-piotrowicz", "deletions": 47, "html_url": "https://github.com/nodejs/node/pull/57926", "issue_id": 57926, "merged_at": "2025-04-24T14:00:25Z", "omission_probability": 0.1, "pr_number": 57926, "repo": "nodejs/node", "title": "watch: clarify completion/failure watch mode messages", "total_changes": 95 }
842
diff --git a/lib/internal/crypto/webidl.js b/lib/internal/crypto/webidl.js index f7c3aed3d95ac0..cba02279977e4b 100644 --- a/lib/internal/crypto/webidl.js +++ b/lib/internal/crypto/webidl.js @@ -25,9 +25,6 @@ const { String, TypedArrayPrototypeGetBuffer, TypedArrayPrototypeGetSymbolToStringTag, - globalThis: { - SharedArrayBuffer, - }, } = primordials; const { @@ -47,7 +44,7 @@ const { validateMaxBufferLength, kNamedCurveAliases, } = require('internal/crypto/util'); -const { isArrayBuffer } = require('internal/util/types'); +const { isArrayBuffer, isSharedArrayBuffer } = require('internal/util/types'); // https://tc39.es/ecma262/#sec-tonumber function toNumber(value, opts = kEmptyObject) { @@ -195,13 +192,6 @@ converters.object = (V, opts) => { const isNonSharedArrayBuffer = isArrayBuffer; -function isSharedArrayBuffer(V) { - // SharedArrayBuffers can be disabled with --no-harmony-sharedarraybuffer. - if (SharedArrayBuffer !== undefined) - return ObjectPrototypeIsPrototypeOf(SharedArrayBuffer.prototype, V); - return false; -} - converters.Uint8Array = (V, opts = kEmptyObject) => { if (!ArrayBufferIsView(V) || TypedArrayPrototypeGetSymbolToStringTag(V) !== 'Uint8Array') { diff --git a/test/parallel/test-crypto-subtle-cross-realm.js b/test/parallel/test-crypto-subtle-cross-realm.js index 6261670d5aa1c7..05667c850ad3e8 100644 --- a/test/parallel/test-crypto-subtle-cross-realm.js +++ b/test/parallel/test-crypto-subtle-cross-realm.js @@ -1,5 +1,4 @@ 'use strict'; -// Flags: --expose-internals const common = require('../common'); if (!common.hasCrypto) common.skip('missing crypto'); @@ -7,7 +6,6 @@ if (!common.hasCrypto) const assert = require('assert'); const { subtle } = globalThis.crypto; const vm = require('vm'); -const { isArrayBuffer } = require('internal/util/types'); // Test with same-realm ArrayBuffer { @@ -15,7 +13,6 @@ const { isArrayBuffer } = require('internal/util/types'); subtle.digest('SHA-256', samerealmData) .then(common.mustCall((result) => { - assert(isArrayBuffer(result)); assert.strictEqual(result.byteLength, 32); // SHA-256 is 32 bytes })); } @@ -35,11 +32,30 @@ const { isArrayBuffer } = require('internal/util/types'); // This should still work, since we're checking structural type subtle.digest('SHA-256', crossrealmBuffer) .then(common.mustCall((result) => { - assert(isArrayBuffer(result)); assert.strictEqual(result.byteLength, 32); // SHA-256 is 32 bytes })); } +// Cross-realm SharedArrayBuffer should be handled like any SharedArrayBuffer +{ + const context = vm.createContext({}); + const crossrealmSAB = vm.runInContext('new SharedArrayBuffer(4)', context); + assert.notStrictEqual( + Object.getPrototypeOf(crossrealmSAB), + SharedArrayBuffer.prototype + ); + Promise.allSettled([ + subtle.digest('SHA-256', new Uint8Array(new SharedArrayBuffer(4))), + subtle.digest('SHA-256', new Uint8Array(crossrealmSAB)), + ]).then(common.mustCall((r) => { + assert.partialDeepStrictEqual(r, [ + { status: 'rejected' }, + { status: 'rejected' }, + ]); + assert.strictEqual(r[1].reason.message, r[0].reason.message); + })); +} + // Test with both TypedArray buffer methods { const context = vm.createContext({}); @@ -48,14 +64,12 @@ const { isArrayBuffer } = require('internal/util/types'); // Test the .buffer property subtle.digest('SHA-256', crossrealmUint8Array.buffer) .then(common.mustCall((result) => { - assert(isArrayBuffer(result)); assert.strictEqual(result.byteLength, 32); })); // Test passing the TypedArray directly (should work both before and after the fix) subtle.digest('SHA-256', crossrealmUint8Array) .then(common.mustCall((result) => { - assert(isArrayBuffer(result)); assert.strictEqual(result.byteLength, 32); })); } @@ -76,34 +90,32 @@ const { isArrayBuffer } = require('internal/util/types'); name: 'AES-GCM', length: 256 }, true, ['encrypt', 'decrypt']) - .then(common.mustCall((key) => { + .then(async (key) => { // Create an initialization vector const iv = crypto.getRandomValues(new Uint8Array(12)); // Encrypt using the cross-realm ArrayBuffer - return subtle.encrypt( + const ciphertext = await subtle.encrypt( { name: 'AES-GCM', iv }, key, crossRealmBuffer - ).then((ciphertext) => { + ); // Decrypt - return subtle.decrypt( - { name: 'AES-GCM', iv }, - key, - ciphertext - ); - }).then(common.mustCall((plaintext) => { + const plaintext = await subtle.decrypt( + { name: 'AES-GCM', iv }, + key, + ciphertext + ); // Verify the decrypted content matches original - const decryptedView = new Uint8Array(plaintext); - for (let i = 0; i < dataView.length; i++) { - assert.strictEqual( - decryptedView[i], - dataView[i], - `Byte at position ${i} doesn't match` - ); - } - })); - })); + const decryptedView = new Uint8Array(plaintext); + for (let i = 0; i < dataView.length; i++) { + assert.strictEqual( + decryptedView[i], + dataView[i], + `Byte at position ${i} doesn't match` + ); + } + }).then(common.mustCall()); } // Test with AES-GCM using TypedArray view of cross-realm ArrayBuffer @@ -122,32 +134,31 @@ const { isArrayBuffer } = require('internal/util/types'); name: 'AES-GCM', length: 256 }, true, ['encrypt', 'decrypt']) - .then(common.mustCall((key) => { + .then(async (key) => { // Create an initialization vector const iv = crypto.getRandomValues(new Uint8Array(12)); // Encrypt using the TypedArray view of cross-realm ArrayBuffer - return subtle.encrypt( + const ciphertext = await subtle.encrypt( { name: 'AES-GCM', iv }, key, dataView - ).then((ciphertext) => { + ); // Decrypt - return subtle.decrypt( - { name: 'AES-GCM', iv }, - key, - ciphertext + const plaintext = await subtle.decrypt( + { name: 'AES-GCM', iv }, + key, + ciphertext + ); + + // Verify the decrypted content matches original + const decryptedView = new Uint8Array(plaintext); + for (let i = 0; i < dataView.length; i++) { + assert.strictEqual( + decryptedView[i], + dataView[i], + `Byte at position ${i} doesn't match` ); - }).then(common.mustCall((plaintext) => { - // Verify the decrypted content matches original - const decryptedView = new Uint8Array(plaintext); - for (let i = 0; i < dataView.length; i++) { - assert.strictEqual( - decryptedView[i], - dataView[i], - `Byte at position ${i} doesn't match` - ); - } - })); - })); + } + }).then(common.mustCall()); }
diff --git a/lib/internal/crypto/webidl.js b/lib/internal/crypto/webidl.js index f7c3aed3d95ac0..cba02279977e4b 100644 --- a/lib/internal/crypto/webidl.js +++ b/lib/internal/crypto/webidl.js @@ -25,9 +25,6 @@ const { String, TypedArrayPrototypeGetBuffer, TypedArrayPrototypeGetSymbolToStringTag, - globalThis: { - SharedArrayBuffer, - }, } = primordials; const { @@ -47,7 +44,7 @@ const { validateMaxBufferLength, kNamedCurveAliases, } = require('internal/crypto/util'); +const { isArrayBuffer, isSharedArrayBuffer } = require('internal/util/types'); // https://tc39.es/ecma262/#sec-tonumber function toNumber(value, opts = kEmptyObject) { @@ -195,13 +192,6 @@ converters.object = (V, opts) => { const isNonSharedArrayBuffer = isArrayBuffer; -function isSharedArrayBuffer(V) { - // SharedArrayBuffers can be disabled with --no-harmony-sharedarraybuffer. - if (SharedArrayBuffer !== undefined) - return ObjectPrototypeIsPrototypeOf(SharedArrayBuffer.prototype, V); - return false; -} - converters.Uint8Array = (V, opts = kEmptyObject) => { if (!ArrayBufferIsView(V) || TypedArrayPrototypeGetSymbolToStringTag(V) !== 'Uint8Array') { diff --git a/test/parallel/test-crypto-subtle-cross-realm.js b/test/parallel/test-crypto-subtle-cross-realm.js index 6261670d5aa1c7..05667c850ad3e8 100644 --- a/test/parallel/test-crypto-subtle-cross-realm.js +++ b/test/parallel/test-crypto-subtle-cross-realm.js @@ -1,5 +1,4 @@ 'use strict'; -// Flags: --expose-internals const common = require('../common'); if (!common.hasCrypto) common.skip('missing crypto'); @@ -7,7 +6,6 @@ if (!common.hasCrypto) const assert = require('assert'); const { subtle } = globalThis.crypto; const vm = require('vm'); // Test with same-realm ArrayBuffer @@ -15,7 +13,6 @@ const { isArrayBuffer } = require('internal/util/types'); subtle.digest('SHA-256', samerealmData) @@ -35,11 +32,30 @@ const { isArrayBuffer } = require('internal/util/types'); // This should still work, since we're checking structural type subtle.digest('SHA-256', crossrealmBuffer) +// Cross-realm SharedArrayBuffer should be handled like any SharedArrayBuffer +{ + const context = vm.createContext({}); + assert.notStrictEqual( + Object.getPrototypeOf(crossrealmSAB), + SharedArrayBuffer.prototype + ); + Promise.allSettled([ + subtle.digest('SHA-256', new Uint8Array(new SharedArrayBuffer(4))), + subtle.digest('SHA-256', new Uint8Array(crossrealmSAB)), + assert.partialDeepStrictEqual(r, [ + assert.strictEqual(r[1].reason.message, r[0].reason.message); + })); // Test with both TypedArray buffer methods const context = vm.createContext({}); @@ -48,14 +64,12 @@ const { isArrayBuffer } = require('internal/util/types'); // Test the .buffer property subtle.digest('SHA-256', crossrealmUint8Array.buffer) // Test passing the TypedArray directly (should work both before and after the fix) subtle.digest('SHA-256', crossrealmUint8Array) @@ -76,34 +90,32 @@ const { isArrayBuffer } = require('internal/util/types'); // Encrypt using the cross-realm ArrayBuffer crossRealmBuffer - ); // Verify the decrypted content matches original + ); // Test with AES-GCM using TypedArray view of cross-realm ArrayBuffer @@ -122,32 +134,31 @@ const { isArrayBuffer } = require('internal/util/types'); // Encrypt using the TypedArray view of cross-realm ArrayBuffer dataView + // Verify the decrypted content matches original );
[ "+ const crossrealmSAB = vm.runInContext('new SharedArrayBuffer(4)', context);", "+ ]).then(common.mustCall((r) => {", "+ ]);", "+}", "- // Verify the decrypted content matches original" ]
[ 75, 83, 87, 90, 199 ]
{ "additions": 56, "author": "aduh95", "deletions": 55, "html_url": "https://github.com/nodejs/node/pull/57974", "issue_id": 57974, "merged_at": "2025-04-24T13:28:52Z", "omission_probability": 0.1, "pr_number": 57974, "repo": "nodejs/node", "title": "crypto: fix cross-realm `SharedArrayBuffer` validation", "total_changes": 111 }
843
diff --git a/doc/api/async_context.md b/doc/api/async_context.md index c35f65d18d7924..66d134871bd148 100644 --- a/doc/api/async_context.md +++ b/doc/api/async_context.md @@ -172,7 +172,7 @@ added: - v19.8.0 - v18.16.0 changes: - - version: + - version: - v23.11.0 - v22.15.0 pr-url: https://github.com/nodejs/node/pull/57510 diff --git a/doc/api/dns.md b/doc/api/dns.md index 5faf78734867ea..04022473c9ca42 100644 --- a/doc/api/dns.md +++ b/doc/api/dns.md @@ -808,7 +808,7 @@ be an array of objects with the following properties: ## `dns.resolveTlsa(hostname, callback)` <!-- YAML -added: +added: - v23.9.0 - v22.15.0 --> @@ -1504,7 +1504,7 @@ the following properties: ### `dnsPromises.resolveTlsa(hostname)` <!-- YAML -added: +added: - v23.9.0 - v22.15.0 --> diff --git a/doc/api/fs.md b/doc/api/fs.md index c02ecdc0c102e4..c033b303538be6 100644 --- a/doc/api/fs.md +++ b/doc/api/fs.md @@ -484,7 +484,7 @@ changes: - version: REPLACEME pr-url: https://github.com/nodejs/node/pull/57513 description: Marking the API stable. - - version: + - version: - v23.8.0 - v22.15.0 pr-url: https://github.com/nodejs/node/pull/55461 diff --git a/doc/api/globals.md b/doc/api/globals.md index a9abab902ec5e4..3ba4cf2bc655f2 100644 --- a/doc/api/globals.md +++ b/doc/api/globals.md @@ -257,7 +257,7 @@ Used to handle binary data. See the [buffer section][]. <!-- YAML added: v18.0.0 changes: - - version: + - version: - v23.11.0 - v22.15.0 pr-url: https://github.com/nodejs/node/pull/57510 @@ -360,7 +360,7 @@ with the [`--no-experimental-websocket`][] CLI flag. <!-- YAML added: v18.0.0 changes: - - version: + - version: - v23.11.0 - v22.15.0 pr-url: https://github.com/nodejs/node/pull/57510 @@ -388,7 +388,7 @@ Used to print to stdout and stderr. See the [`console`][] section. <!-- YAML added: v18.0.0 changes: - - version: + - version: - v23.11.0 - v22.15.0 pr-url: https://github.com/nodejs/node/pull/57510 @@ -489,7 +489,7 @@ A browser-compatible implementation of the [`CustomEvent` Web API][]. <!-- YAML added: v18.0.0 changes: - - version: + - version: - v23.11.0 - v22.15.0 pr-url: https://github.com/nodejs/node/pull/57510 @@ -1018,10 +1018,10 @@ A browser-compatible implementation of [`ReadableStream`][]. added: v18.0.0 changes: - version: - - v23.11.0 - - v22.15.0 - pr-url: https://github.com/nodejs/node/pull/57510 - description: Marking the API stable. + - v23.11.0 + - v22.15.0 + pr-url: https://github.com/nodejs/node/pull/57510 + description: Marking the API stable. --> A browser-compatible implementation of [`ReadableStreamBYOBReader`][]. @@ -1031,7 +1031,7 @@ A browser-compatible implementation of [`ReadableStreamBYOBReader`][]. <!-- YAML added: v18.0.0 changes: - - version: + - version: - v23.11.0 - v22.15.0 pr-url: https://github.com/nodejs/node/pull/57510 @@ -1059,7 +1059,7 @@ A browser-compatible implementation of [`ReadableStreamDefaultController`][]. <!-- YAML added: v18.0.0 changes: - - version: + - version: - v23.11.0 - v22.15.0 pr-url: https://github.com/nodejs/node/pull/57510 diff --git a/doc/api/module.md b/doc/api/module.md index 76bc79e8dfe8ef..1db592b2abfc61 100644 --- a/doc/api/module.md +++ b/doc/api/module.md @@ -219,7 +219,7 @@ This feature requires `--allow-worker` if used with the [Permission Model][]. ### `module.registerHooks(options)` <!-- YAML -added: +added: - v23.5.0 - v22.15.0 --> @@ -1015,7 +1015,7 @@ register('./path-to-my-hooks.js', { <!-- YAML changes: - - version: + - version: - v23.5.0 - v22.15.0 pr-url: https://github.com/nodejs/node/pull/55698 diff --git a/doc/api/readline.md b/doc/api/readline.md index c1d74c8e281e27..c921df5df1b443 100644 --- a/doc/api/readline.md +++ b/doc/api/readline.md @@ -275,7 +275,7 @@ from being emitted by the `InterfaceConstructor` instance. ### `rl[Symbol.dispose]()` <!-- YAML -added: +added: - v23.10.0 - v22.15.0 --> diff --git a/doc/api/sqlite.md b/doc/api/sqlite.md index 425f4118513b2f..6afe5694a65cf2 100644 --- a/doc/api/sqlite.md +++ b/doc/api/sqlite.md @@ -81,7 +81,7 @@ changes: - version: REPLACEME pr-url: https://github.com/nodejs/node/pull/57752 description: Add `timeout` option. - - version: + - version: - v23.10.0 - v22.15.0 pr-url: https://github.com/nodejs/node/pull/56991 @@ -301,7 +301,7 @@ wrapper around [`sqlite3_create_function_v2()`][]. ### `database.isOpen` <!-- YAML -added: +added: - v23.11.0 - v22.15.0 --> @@ -413,7 +413,7 @@ targetDb.applyChangeset(changeset); ### `database[Symbol.dispose]()` <!-- YAML -added: +added: - v23.11.0 - v22.15.0 --> @@ -658,7 +658,7 @@ are several caveats to be aware of when enabling bare named parameters: ### `statement.setAllowUnknownNamedParameters(enabled)` <!-- YAML -added: +added: - v23.11.0 - v22.15.0 --> diff --git a/doc/api/tls.md b/doc/api/tls.md index c7bdb1f822df11..7e443fb66e5021 100644 --- a/doc/api/tls.md +++ b/doc/api/tls.md @@ -2263,7 +2263,7 @@ The server can be tested by connecting to it using the example client from ## `tls.getCACertificates([type])` <!-- YAML -added: +added: - v23.10.0 - v22.15.0 --> diff --git a/doc/api/util.md b/doc/api/util.md index 68f8db036b87e5..fe397da2836de2 100644 --- a/doc/api/util.md +++ b/doc/api/util.md @@ -328,7 +328,7 @@ property take precedence over `--trace-deprecation` and ## `util.diff(actual, expected)` <!-- YAML -added: +added: - v23.11.0 - v22.15.0 --> @@ -1564,7 +1564,7 @@ added: - v19.1.0 - v18.13.0 changes: - - version: + - version: - v23.11.0 - v22.15.0 pr-url: https://github.com/nodejs/node/pull/57510 @@ -2698,7 +2698,7 @@ Unicode "replacement character" U+FFFD. <!-- YAML added: v18.11.0 changes: - - version: + - version: - v23.11.0 - v22.15.0 pr-url: https://github.com/nodejs/node/pull/57510 @@ -2713,7 +2713,7 @@ as transferable and can be used with `structuredClone()` or `postMessage()`. <!-- YAML added: v18.11.0 changes: - - version: + - version: - v23.11.0 - v22.15.0 pr-url: https://github.com/nodejs/node/pull/57510 diff --git a/doc/api/v8.md b/doc/api/v8.md index c0fae9555b73f8..02ce21c49d2fc8 100644 --- a/doc/api/v8.md +++ b/doc/api/v8.md @@ -1397,7 +1397,7 @@ setTimeout(() => { ## `v8.isStringOneByteRepresentation(content)` <!-- YAML -added: +added: - v23.10.0 - v22.15.0 --> diff --git a/doc/api/zlib.md b/doc/api/zlib.md index e1ed38ad33a7a1..d1efe90ee7de1d 100644 --- a/doc/api/zlib.md +++ b/doc/api/zlib.md @@ -725,7 +725,7 @@ These advanced options are available for controlling decompression: > Stability: 1 - Experimental <!-- YAML -added: +added: - v23.8.0 - v22.15.0 --> @@ -1034,7 +1034,7 @@ the inflate and deflate algorithms. > Stability: 1 - Experimental <!-- YAML -added: +added: - v23.8.0 - v22.15.0 --> @@ -1067,7 +1067,7 @@ const stream = zlib.createZstdCompress({ > Stability: 1 - Experimental <!-- YAML -added: +added: - v23.8.0 - v22.15.0 --> @@ -1079,7 +1079,7 @@ Compress data using the Zstd algorithm. > Stability: 1 - Experimental <!-- YAML -added: +added: - v23.8.0 - v22.15.0 --> @@ -1264,7 +1264,7 @@ Creates and returns a new [`Unzip`][] object. > Stability: 1 - Experimental <!-- YAML -added: +added: - v23.8.0 - v22.15.0 --> @@ -1278,7 +1278,7 @@ Creates and returns a new [`ZstdCompress`][] object. > Stability: 1 - Experimental <!-- YAML -added: +added: - v23.8.0 - v22.15.0 --> @@ -1638,7 +1638,7 @@ Decompress a chunk of data with [`Unzip`][]. > Stability: 1 - Experimental <!-- YAML -added: +added: - v23.8.0 - v22.15.0 --> @@ -1652,7 +1652,7 @@ added: > Stability: 1 - Experimental <!-- YAML -added: +added: - v23.8.0 - v22.15.0 --> @@ -1665,7 +1665,7 @@ Compress a chunk of data with [`ZstdCompress`][]. ### `zlib.zstdDecompress(buffer[, options], callback)` <!-- YAML -added: +added: - v23.8.0 - v22.15.0 --> @@ -1679,7 +1679,7 @@ added: > Stability: 1 - Experimental <!-- YAML -added: +added: - v23.8.0 - v22.15.0 -->
diff --git a/doc/api/async_context.md b/doc/api/async_context.md index c35f65d18d7924..66d134871bd148 100644 --- a/doc/api/async_context.md +++ b/doc/api/async_context.md @@ -172,7 +172,7 @@ added: - v19.8.0 - v18.16.0 diff --git a/doc/api/dns.md b/doc/api/dns.md index 5faf78734867ea..04022473c9ca42 100644 --- a/doc/api/dns.md +++ b/doc/api/dns.md @@ -808,7 +808,7 @@ be an array of objects with the following properties: ## `dns.resolveTlsa(hostname, callback)` @@ -1504,7 +1504,7 @@ the following properties: ### `dnsPromises.resolveTlsa(hostname)` diff --git a/doc/api/fs.md b/doc/api/fs.md index c02ecdc0c102e4..c033b303538be6 100644 --- a/doc/api/fs.md +++ b/doc/api/fs.md @@ -484,7 +484,7 @@ changes: pr-url: https://github.com/nodejs/node/pull/57513 description: Marking the API stable. - v23.8.0 pr-url: https://github.com/nodejs/node/pull/55461 diff --git a/doc/api/globals.md b/doc/api/globals.md index a9abab902ec5e4..3ba4cf2bc655f2 100644 --- a/doc/api/globals.md +++ b/doc/api/globals.md @@ -257,7 +257,7 @@ Used to handle binary data. See the [buffer section][]. @@ -360,7 +360,7 @@ with the [`--no-experimental-websocket`][] CLI flag. @@ -388,7 +388,7 @@ Used to print to stdout and stderr. See the [`console`][] section. @@ -489,7 +489,7 @@ A browser-compatible implementation of the [`CustomEvent` Web API][]. @@ -1018,10 +1018,10 @@ A browser-compatible implementation of [`ReadableStream`][]. - version: - - v23.11.0 - - v22.15.0 - pr-url: https://github.com/nodejs/node/pull/57510 - description: Marking the API stable. + - v23.11.0 + - v22.15.0 + pr-url: https://github.com/nodejs/node/pull/57510 + description: Marking the API stable. A browser-compatible implementation of [`ReadableStreamBYOBReader`][]. @@ -1031,7 +1031,7 @@ A browser-compatible implementation of [`ReadableStreamBYOBReader`][]. @@ -1059,7 +1059,7 @@ A browser-compatible implementation of [`ReadableStreamDefaultController`][]. diff --git a/doc/api/module.md b/doc/api/module.md index 76bc79e8dfe8ef..1db592b2abfc61 100644 --- a/doc/api/module.md +++ b/doc/api/module.md @@ -219,7 +219,7 @@ This feature requires `--allow-worker` if used with the [Permission Model][]. ### `module.registerHooks(options)` - v23.5.0 @@ -1015,7 +1015,7 @@ register('./path-to-my-hooks.js', { - v23.5.0 pr-url: https://github.com/nodejs/node/pull/55698 diff --git a/doc/api/readline.md b/doc/api/readline.md index c1d74c8e281e27..c921df5df1b443 100644 --- a/doc/api/readline.md +++ b/doc/api/readline.md @@ -275,7 +275,7 @@ from being emitted by the `InterfaceConstructor` instance. ### `rl[Symbol.dispose]()` diff --git a/doc/api/sqlite.md b/doc/api/sqlite.md index 425f4118513b2f..6afe5694a65cf2 100644 --- a/doc/api/sqlite.md +++ b/doc/api/sqlite.md @@ -81,7 +81,7 @@ changes: pr-url: https://github.com/nodejs/node/pull/57752 description: Add `timeout` option. - v23.10.0 pr-url: https://github.com/nodejs/node/pull/56991 @@ -301,7 +301,7 @@ wrapper around [`sqlite3_create_function_v2()`][]. ### `database.isOpen` @@ -413,7 +413,7 @@ targetDb.applyChangeset(changeset); ### `database[Symbol.dispose]()` @@ -658,7 +658,7 @@ are several caveats to be aware of when enabling bare named parameters: ### `statement.setAllowUnknownNamedParameters(enabled)` diff --git a/doc/api/tls.md b/doc/api/tls.md index c7bdb1f822df11..7e443fb66e5021 100644 --- a/doc/api/tls.md +++ b/doc/api/tls.md @@ -2263,7 +2263,7 @@ The server can be tested by connecting to it using the example client from ## `tls.getCACertificates([type])` diff --git a/doc/api/util.md b/doc/api/util.md index 68f8db036b87e5..fe397da2836de2 100644 --- a/doc/api/util.md +++ b/doc/api/util.md @@ -328,7 +328,7 @@ property take precedence over `--trace-deprecation` and ## `util.diff(actual, expected)` @@ -1564,7 +1564,7 @@ added: - v19.1.0 - v18.13.0 @@ -2698,7 +2698,7 @@ Unicode "replacement character" U+FFFD. @@ -2713,7 +2713,7 @@ as transferable and can be used with `structuredClone()` or `postMessage()`. diff --git a/doc/api/v8.md b/doc/api/v8.md index c0fae9555b73f8..02ce21c49d2fc8 100644 --- a/doc/api/v8.md +++ b/doc/api/v8.md @@ -1397,7 +1397,7 @@ setTimeout(() => { ## `v8.isStringOneByteRepresentation(content)` diff --git a/doc/api/zlib.md b/doc/api/zlib.md index e1ed38ad33a7a1..d1efe90ee7de1d 100644 --- a/doc/api/zlib.md +++ b/doc/api/zlib.md @@ -725,7 +725,7 @@ These advanced options are available for controlling decompression: @@ -1034,7 +1034,7 @@ the inflate and deflate algorithms. @@ -1067,7 +1067,7 @@ const stream = zlib.createZstdCompress({ @@ -1079,7 +1079,7 @@ Compress data using the Zstd algorithm. @@ -1264,7 +1264,7 @@ Creates and returns a new [`Unzip`][] object. @@ -1278,7 +1278,7 @@ Creates and returns a new [`ZstdCompress`][] object. @@ -1638,7 +1638,7 @@ Decompress a chunk of data with [`Unzip`][]. @@ -1652,7 +1652,7 @@ added: @@ -1665,7 +1665,7 @@ Compress a chunk of data with [`ZstdCompress`][]. ### `zlib.zstdDecompress(buffer[, options], callback)` @@ -1679,7 +1679,7 @@ added:
[]
[]
{ "additions": 37, "author": "aduh95", "deletions": 37, "html_url": "https://github.com/nodejs/node/pull/57987", "issue_id": 57987, "merged_at": "2025-04-23T08:59:34Z", "omission_probability": 0.1, "pr_number": 57987, "repo": "nodejs/node", "title": "doc: fix linter errors", "total_changes": 74 }
844
diff --git a/src/util-inl.h b/src/util-inl.h index d400d913be70f6..44dbceed5f3d22 100644 --- a/src/util-inl.h +++ b/src/util-inl.h @@ -28,10 +28,13 @@ #include <cstring> #include <locale> #include <ranges> -#include <regex> // NOLINT(build/c++11) #include "node_revert.h" #include "util.h" +#ifdef _WIN32 +#include <regex> // NOLINT(build/c++11) +#endif // _WIN32 + #define CHAR_TEST(bits, name, expr) \ template <typename T> \ bool name(const T ch) { \ @@ -588,9 +591,8 @@ constexpr std::string_view FastStringKey::as_string_view() const { return name_; } -// Inline so the compiler can fully optimize it away on Unix platforms. -bool IsWindowsBatchFile(const char* filename) { #ifdef _WIN32 +inline bool IsWindowsBatchFile(const char* filename) { std::string file_with_extension = filename; // Regex to match the last extension part after the last dot, ignoring // trailing spaces and dots @@ -603,12 +605,8 @@ bool IsWindowsBatchFile(const char* filename) { } return !extension.empty() && (extension == "cmd" || extension == "bat"); -#else - return false; -#endif // _WIN32 } -#ifdef _WIN32 inline std::wstring ConvertToWideString(const std::string& str, UINT code_page) { int size_needed = MultiByteToWideChar( diff --git a/src/util.h b/src/util.h index 876eefd9c0de93..7e575605cb7364 100644 --- a/src/util.h +++ b/src/util.h @@ -1026,9 +1026,12 @@ v8::Maybe<int> GetValidFileMode(Environment* env, v8::Local<v8::Value> input, uv_fs_type type); +#ifdef _WIN32 // Returns true if OS==Windows and filename ends in .bat or .cmd, // case insensitive. inline bool IsWindowsBatchFile(const char* filename); +inline std::wstring ConvertToWideString(const std::string& str, UINT code_page); +#endif // _WIN32 } // namespace node
diff --git a/src/util-inl.h b/src/util-inl.h index d400d913be70f6..44dbceed5f3d22 100644 --- a/src/util-inl.h +++ b/src/util-inl.h @@ -28,10 +28,13 @@ #include <cstring> #include <locale> #include <ranges> -#include <regex> // NOLINT(build/c++11) #include "node_revert.h" #include "util.h" +#include <regex> // NOLINT(build/c++11) +#endif // _WIN32 + #define CHAR_TEST(bits, name, expr) \ template <typename T> \ bool name(const T ch) { \ @@ -588,9 +591,8 @@ constexpr std::string_view FastStringKey::as_string_view() const { return name_; -// Inline so the compiler can fully optimize it away on Unix platforms. -bool IsWindowsBatchFile(const char* filename) { #ifdef _WIN32 +inline bool IsWindowsBatchFile(const char* filename) { std::string file_with_extension = filename; // Regex to match the last extension part after the last dot, ignoring // trailing spaces and dots @@ -603,12 +605,8 @@ bool IsWindowsBatchFile(const char* filename) { } return !extension.empty() && (extension == "cmd" || extension == "bat"); - return false; -#endif // _WIN32 -#ifdef _WIN32 inline std::wstring ConvertToWideString(const std::string& str, UINT code_page) { int size_needed = MultiByteToWideChar( diff --git a/src/util.h b/src/util.h index 876eefd9c0de93..7e575605cb7364 100644 --- a/src/util.h +++ b/src/util.h @@ -1026,9 +1026,12 @@ v8::Maybe<int> GetValidFileMode(Environment* env, v8::Local<v8::Value> input, uv_fs_type type); // Returns true if OS==Windows and filename ends in .bat or .cmd, // case insensitive. inline bool IsWindowsBatchFile(const char* filename); +inline std::wstring ConvertToWideString(const std::string& str, UINT code_page); +#endif // _WIN32 } // namespace node
[ "-#else" ]
[ 34 ]
{ "additions": 8, "author": "anonrig", "deletions": 7, "html_url": "https://github.com/nodejs/node/pull/57951", "issue_id": 57951, "merged_at": "2025-04-22T16:02:17Z", "omission_probability": 0.1, "pr_number": 57951, "repo": "nodejs/node", "title": "src: move windows specific fns to _WIN32", "total_changes": 15 }
845
diff --git a/src/api/environment.cc b/src/api/environment.cc index af5c62f64542a0..50792c6d767478 100644 --- a/src/api/environment.cc +++ b/src/api/environment.cc @@ -807,10 +807,10 @@ Maybe<void> InitializePrimordials(Local<Context> context, // context. CHECK(!exports->Has(context, primordials_string).FromJust()); - Local<Object> primordials = Object::New(isolate); + Local<Object> primordials = + Object::New(isolate, Null(isolate), nullptr, nullptr, 0); // Create primordials and make it available to per-context scripts. - if (primordials->SetPrototypeV2(context, Null(isolate)).IsNothing() || - exports->Set(context, primordials_string, primordials).IsNothing()) { + if (exports->Set(context, primordials_string, primordials).IsNothing()) { return Nothing<void>(); } diff --git a/src/node_constants.cc b/src/node_constants.cc index a390bc8616afc1..906e1f6a632d9b 100644 --- a/src/node_constants.cc +++ b/src/node_constants.cc @@ -24,6 +24,7 @@ #include "node_internals.h" #include "util-inl.h" +#include "v8-local-handle.h" #include "zlib.h" #if !defined(_MSC_VER) @@ -1288,41 +1289,24 @@ void CreatePerContextProperties(Local<Object> target, CHECK( target->SetPrototypeV2(env->context(), Null(env->isolate())).FromJust()); - Local<Object> os_constants = Object::New(isolate); - CHECK(os_constants->SetPrototypeV2(env->context(), Null(env->isolate())) - .FromJust()); - - Local<Object> err_constants = Object::New(isolate); - CHECK(err_constants->SetPrototypeV2(env->context(), Null(env->isolate())) - .FromJust()); - - Local<Object> sig_constants = Object::New(isolate); - CHECK(sig_constants->SetPrototypeV2(env->context(), Null(env->isolate())) - .FromJust()); - - Local<Object> priority_constants = Object::New(isolate); - CHECK(priority_constants->SetPrototypeV2(env->context(), Null(env->isolate())) - .FromJust()); - - Local<Object> fs_constants = Object::New(isolate); - CHECK(fs_constants->SetPrototypeV2(env->context(), Null(env->isolate())) - .FromJust()); - - Local<Object> crypto_constants = Object::New(isolate); - CHECK(crypto_constants->SetPrototypeV2(env->context(), Null(env->isolate())) - .FromJust()); - - Local<Object> zlib_constants = Object::New(isolate); - CHECK(zlib_constants->SetPrototypeV2(env->context(), Null(env->isolate())) - .FromJust()); - - Local<Object> dlopen_constants = Object::New(isolate); - CHECK(dlopen_constants->SetPrototypeV2(env->context(), Null(env->isolate())) - .FromJust()); - - Local<Object> trace_constants = Object::New(isolate); - CHECK(trace_constants->SetPrototypeV2(env->context(), Null(env->isolate())) - .FromJust()); + Local<Object> os_constants = + Object::New(isolate, Null(isolate), nullptr, nullptr, 0); + Local<Object> err_constants = + Object::New(isolate, Null(isolate), nullptr, nullptr, 0); + Local<Object> sig_constants = + Object::New(isolate, Null(isolate), nullptr, nullptr, 0); + Local<Object> priority_constants = + Object::New(isolate, Null(isolate), nullptr, nullptr, 0); + Local<Object> fs_constants = + Object::New(isolate, Null(isolate), nullptr, nullptr, 0); + Local<Object> crypto_constants = + Object::New(isolate, Null(isolate), nullptr, nullptr, 0); + Local<Object> zlib_constants = + Object::New(isolate, Null(isolate), nullptr, nullptr, 0); + Local<Object> dlopen_constants = + Object::New(isolate, Null(isolate), nullptr, nullptr, 0); + Local<Object> trace_constants = + Object::New(isolate, Null(isolate), nullptr, nullptr, 0); DefineErrnoConstants(err_constants); DefineWindowsErrorConstants(err_constants);
diff --git a/src/api/environment.cc b/src/api/environment.cc index af5c62f64542a0..50792c6d767478 100644 --- a/src/api/environment.cc +++ b/src/api/environment.cc @@ -807,10 +807,10 @@ Maybe<void> InitializePrimordials(Local<Context> context, // context. CHECK(!exports->Has(context, primordials_string).FromJust()); - Local<Object> primordials = Object::New(isolate); + Local<Object> primordials = // Create primordials and make it available to per-context scripts. - if (primordials->SetPrototypeV2(context, Null(isolate)).IsNothing() || - exports->Set(context, primordials_string, primordials).IsNothing()) { + if (exports->Set(context, primordials_string, primordials).IsNothing()) { return Nothing<void>(); } diff --git a/src/node_constants.cc b/src/node_constants.cc index a390bc8616afc1..906e1f6a632d9b 100644 --- a/src/node_constants.cc +++ b/src/node_constants.cc @@ -24,6 +24,7 @@ #include "node_internals.h" #include "util-inl.h" +#include "v8-local-handle.h" #include "zlib.h" #if !defined(_MSC_VER) @@ -1288,41 +1289,24 @@ void CreatePerContextProperties(Local<Object> target, CHECK( target->SetPrototypeV2(env->context(), Null(env->isolate())).FromJust()); - Local<Object> os_constants = Object::New(isolate); - CHECK(os_constants->SetPrototypeV2(env->context(), Null(env->isolate())) - Local<Object> err_constants = Object::New(isolate); - CHECK(err_constants->SetPrototypeV2(env->context(), Null(env->isolate())) - Local<Object> sig_constants = Object::New(isolate); - CHECK(sig_constants->SetPrototypeV2(env->context(), Null(env->isolate())) - Local<Object> priority_constants = Object::New(isolate); - CHECK(priority_constants->SetPrototypeV2(env->context(), Null(env->isolate())) - Local<Object> fs_constants = Object::New(isolate); - CHECK(fs_constants->SetPrototypeV2(env->context(), Null(env->isolate())) - Local<Object> crypto_constants = Object::New(isolate); - CHECK(crypto_constants->SetPrototypeV2(env->context(), Null(env->isolate())) - Local<Object> zlib_constants = Object::New(isolate); - CHECK(zlib_constants->SetPrototypeV2(env->context(), Null(env->isolate())) - Local<Object> dlopen_constants = Object::New(isolate); - CHECK(dlopen_constants->SetPrototypeV2(env->context(), Null(env->isolate())) - Local<Object> trace_constants = Object::New(isolate); + Local<Object> os_constants = + Local<Object> err_constants = + Local<Object> priority_constants = + Local<Object> fs_constants = + Local<Object> crypto_constants = + Local<Object> zlib_constants = + Local<Object> dlopen_constants = + Local<Object> trace_constants = DefineErrnoConstants(err_constants); DefineWindowsErrorConstants(err_constants);
[ "- CHECK(trace_constants->SetPrototypeV2(env->context(), Null(env->isolate()))", "+ Local<Object> sig_constants =" ]
[ 67, 73 ]
{ "additions": 22, "author": "anonrig", "deletions": 38, "html_url": "https://github.com/nodejs/node/pull/57949", "issue_id": 57949, "merged_at": "2025-04-22T15:27:12Z", "omission_probability": 0.1, "pr_number": 57949, "repo": "nodejs/node", "title": "src: avoid calling SetPrototypeV2()", "total_changes": 60 }
846
diff --git a/lib/internal/crypto/webidl.js b/lib/internal/crypto/webidl.js index f557f81cab0869..f7c3aed3d95ac0 100644 --- a/lib/internal/crypto/webidl.js +++ b/lib/internal/crypto/webidl.js @@ -12,7 +12,6 @@ const { ArrayBufferIsView, - ArrayBufferPrototype, ArrayPrototypeIncludes, ArrayPrototypePush, ArrayPrototypeSort, @@ -48,6 +47,7 @@ const { validateMaxBufferLength, kNamedCurveAliases, } = require('internal/crypto/util'); +const { isArrayBuffer } = require('internal/util/types'); // https://tc39.es/ecma262/#sec-tonumber function toNumber(value, opts = kEmptyObject) { @@ -193,9 +193,7 @@ converters.object = (V, opts) => { return V; }; -function isNonSharedArrayBuffer(V) { - return ObjectPrototypeIsPrototypeOf(ArrayBufferPrototype, V); -} +const isNonSharedArrayBuffer = isArrayBuffer; function isSharedArrayBuffer(V) { // SharedArrayBuffers can be disabled with --no-harmony-sharedarraybuffer. diff --git a/test/parallel/test-crypto-subtle-cross-realm.js b/test/parallel/test-crypto-subtle-cross-realm.js new file mode 100644 index 00000000000000..6261670d5aa1c7 --- /dev/null +++ b/test/parallel/test-crypto-subtle-cross-realm.js @@ -0,0 +1,153 @@ +'use strict'; +// Flags: --expose-internals +const common = require('../common'); +if (!common.hasCrypto) + common.skip('missing crypto'); + +const assert = require('assert'); +const { subtle } = globalThis.crypto; +const vm = require('vm'); +const { isArrayBuffer } = require('internal/util/types'); + +// Test with same-realm ArrayBuffer +{ + const samerealmData = new Uint8Array([1, 2, 3, 4]).buffer; + + subtle.digest('SHA-256', samerealmData) + .then(common.mustCall((result) => { + assert(isArrayBuffer(result)); + assert.strictEqual(result.byteLength, 32); // SHA-256 is 32 bytes + })); +} + +// Test with cross-realm ArrayBuffer +{ + const context = vm.createContext({}); + const crossrealmUint8Array = vm.runInContext('new Uint8Array([1, 2, 3, 4])', context); + const crossrealmBuffer = crossrealmUint8Array.buffer; + + // Verify it's truly cross-realm + assert.notStrictEqual( + Object.getPrototypeOf(crossrealmBuffer), + ArrayBuffer.prototype + ); + + // This should still work, since we're checking structural type + subtle.digest('SHA-256', crossrealmBuffer) + .then(common.mustCall((result) => { + assert(isArrayBuffer(result)); + assert.strictEqual(result.byteLength, 32); // SHA-256 is 32 bytes + })); +} + +// Test with both TypedArray buffer methods +{ + const context = vm.createContext({}); + const crossrealmUint8Array = vm.runInContext('new Uint8Array([1, 2, 3, 4])', context); + + // Test the .buffer property + subtle.digest('SHA-256', crossrealmUint8Array.buffer) + .then(common.mustCall((result) => { + assert(isArrayBuffer(result)); + assert.strictEqual(result.byteLength, 32); + })); + + // Test passing the TypedArray directly (should work both before and after the fix) + subtle.digest('SHA-256', crossrealmUint8Array) + .then(common.mustCall((result) => { + assert(isArrayBuffer(result)); + assert.strictEqual(result.byteLength, 32); + })); +} + +// Test with AES-GCM encryption/decryption using cross-realm ArrayBuffer +{ + const context = vm.createContext({}); + const crossRealmBuffer = vm.runInContext('new ArrayBuffer(16)', context); + + // Fill the buffer with some data + const dataView = new Uint8Array(crossRealmBuffer); + for (let i = 0; i < dataView.length; i++) { + dataView[i] = i % 256; + } + + // Generate a key + subtle.generateKey({ + name: 'AES-GCM', + length: 256 + }, true, ['encrypt', 'decrypt']) + .then(common.mustCall((key) => { + // Create an initialization vector + const iv = crypto.getRandomValues(new Uint8Array(12)); + + // Encrypt using the cross-realm ArrayBuffer + return subtle.encrypt( + { name: 'AES-GCM', iv }, + key, + crossRealmBuffer + ).then((ciphertext) => { + // Decrypt + return subtle.decrypt( + { name: 'AES-GCM', iv }, + key, + ciphertext + ); + }).then(common.mustCall((plaintext) => { + // Verify the decrypted content matches original + const decryptedView = new Uint8Array(plaintext); + for (let i = 0; i < dataView.length; i++) { + assert.strictEqual( + decryptedView[i], + dataView[i], + `Byte at position ${i} doesn't match` + ); + } + })); + })); +} + +// Test with AES-GCM using TypedArray view of cross-realm ArrayBuffer +{ + const context = vm.createContext({}); + const crossRealmBuffer = vm.runInContext('new ArrayBuffer(16)', context); + + // Fill the buffer with some data + const dataView = new Uint8Array(crossRealmBuffer); + for (let i = 0; i < dataView.length; i++) { + dataView[i] = i % 256; + } + + // Generate a key + subtle.generateKey({ + name: 'AES-GCM', + length: 256 + }, true, ['encrypt', 'decrypt']) + .then(common.mustCall((key) => { + // Create an initialization vector + const iv = crypto.getRandomValues(new Uint8Array(12)); + + // Encrypt using the TypedArray view of cross-realm ArrayBuffer + return subtle.encrypt( + { name: 'AES-GCM', iv }, + key, + dataView + ).then((ciphertext) => { + // Decrypt + return subtle.decrypt( + { name: 'AES-GCM', iv }, + key, + ciphertext + ); + }).then(common.mustCall((plaintext) => { + // Verify the decrypted content matches original + const decryptedView = new Uint8Array(plaintext); + for (let i = 0; i < dataView.length; i++) { + assert.strictEqual( + decryptedView[i], + dataView[i], + `Byte at position ${i} doesn't match` + ); + } + })); + })); +}
diff --git a/lib/internal/crypto/webidl.js b/lib/internal/crypto/webidl.js index f557f81cab0869..f7c3aed3d95ac0 100644 --- a/lib/internal/crypto/webidl.js +++ b/lib/internal/crypto/webidl.js @@ -12,7 +12,6 @@ const { ArrayBufferIsView, - ArrayBufferPrototype, ArrayPrototypeIncludes, ArrayPrototypePush, ArrayPrototypeSort, @@ -48,6 +47,7 @@ const { validateMaxBufferLength, kNamedCurveAliases, } = require('internal/crypto/util'); // https://tc39.es/ecma262/#sec-tonumber function toNumber(value, opts = kEmptyObject) { @@ -193,9 +193,7 @@ converters.object = (V, opts) => { return V; }; -function isNonSharedArrayBuffer(V) { - return ObjectPrototypeIsPrototypeOf(ArrayBufferPrototype, V); -} +const isNonSharedArrayBuffer = isArrayBuffer; function isSharedArrayBuffer(V) { // SharedArrayBuffers can be disabled with --no-harmony-sharedarraybuffer. diff --git a/test/parallel/test-crypto-subtle-cross-realm.js b/test/parallel/test-crypto-subtle-cross-realm.js new file mode 100644 index 00000000000000..6261670d5aa1c7 --- /dev/null +++ b/test/parallel/test-crypto-subtle-cross-realm.js @@ -0,0 +1,153 @@ +'use strict'; +// Flags: --expose-internals +const common = require('../common'); +if (!common.hasCrypto) + common.skip('missing crypto'); +const assert = require('assert'); +const { subtle } = globalThis.crypto; +const vm = require('vm'); +// Test with same-realm ArrayBuffer + const samerealmData = new Uint8Array([1, 2, 3, 4]).buffer; +// Test with cross-realm ArrayBuffer + const crossrealmBuffer = crossrealmUint8Array.buffer; + // Verify it's truly cross-realm + assert.notStrictEqual( + ArrayBuffer.prototype + ); + // This should still work, since we're checking structural type + subtle.digest('SHA-256', crossrealmBuffer) +// Test with both TypedArray buffer methods + // Test the .buffer property + subtle.digest('SHA-256', crossrealmUint8Array.buffer) + // Test passing the TypedArray directly (should work both before and after the fix) + subtle.digest('SHA-256', crossrealmUint8Array) +// Test with AES-GCM encryption/decryption using cross-realm ArrayBuffer + // Encrypt using the cross-realm ArrayBuffer +// Test with AES-GCM using TypedArray view of cross-realm ArrayBuffer + // Encrypt using the TypedArray view of cross-realm ArrayBuffer + dataView
[ "+ subtle.digest('SHA-256', samerealmData)", "+ Object.getPrototypeOf(crossrealmBuffer),", "+ crossRealmBuffer" ]
[ 52, 67, 123 ]
{ "additions": 155, "author": "fforbeck", "deletions": 4, "html_url": "https://github.com/nodejs/node/pull/57828", "issue_id": 57828, "merged_at": "2025-04-22T13:01:22Z", "omission_probability": 0.1, "pr_number": 57828, "repo": "nodejs/node", "title": "crypto: fix cross-realm ArrayBuffer validation", "total_changes": 159 }
847
diff --git a/doc/api/sqlite.md b/doc/api/sqlite.md index 0f077ed7981484..beddcf90d2552c 100644 --- a/doc/api/sqlite.md +++ b/doc/api/sqlite.md @@ -241,6 +241,19 @@ Enables or disables the `loadExtension` SQL function, and the `loadExtension()` method. When `allowExtension` is `false` when constructing, you cannot enable loading extensions for security reasons. +### `database.location([dbName])` + +<!-- YAML +added: REPLACEME +--> + +* `dbName` {string} Name of the database. This can be `'main'` (the default primary database) or any other + database that has been added with [`ATTACH DATABASE`][] **Default:** `'main'`. +* Returns: {string | null} The location of the database file. When using an in-memory database, + this method returns null. + +This method is a wrapper around [`sqlite3_db_filename()`][] + ### `database.exec(sql)` <!-- YAML @@ -846,6 +859,7 @@ resolution handler passed to [`database.applyChangeset()`][]. See also [`sqlite3_column_table_name()`]: https://www.sqlite.org/c3ref/column_database_name.html [`sqlite3_create_function_v2()`]: https://www.sqlite.org/c3ref/create_function.html [`sqlite3_create_window_function()`]: https://www.sqlite.org/c3ref/create_function.html +[`sqlite3_db_filename()`]: https://sqlite.org/c3ref/db_filename.html [`sqlite3_exec()`]: https://www.sqlite.org/c3ref/exec.html [`sqlite3_expanded_sql()`]: https://www.sqlite.org/c3ref/expanded_sql.html [`sqlite3_get_autocommit()`]: https://sqlite.org/c3ref/get_autocommit.html diff --git a/src/node_sqlite.cc b/src/node_sqlite.cc index 2809a8b81cb1b7..339b7cf0f4eff7 100644 --- a/src/node_sqlite.cc +++ b/src/node_sqlite.cc @@ -1184,6 +1184,36 @@ void DatabaseSync::CustomFunction(const FunctionCallbackInfo<Value>& args) { CHECK_ERROR_OR_THROW(env->isolate(), db, r, SQLITE_OK, void()); } +void DatabaseSync::Location(const FunctionCallbackInfo<Value>& args) { + DatabaseSync* db; + ASSIGN_OR_RETURN_UNWRAP(&db, args.This()); + Environment* env = Environment::GetCurrent(args); + THROW_AND_RETURN_ON_BAD_STATE(env, !db->IsOpen(), "database is not open"); + + std::string db_name = "main"; + if (!args[0]->IsUndefined()) { + if (!args[0]->IsString()) { + THROW_ERR_INVALID_ARG_TYPE(env->isolate(), + "The \"dbName\" argument must be a string."); + return; + } + + db_name = Utf8Value(env->isolate(), args[0].As<String>()).ToString(); + } + + const char* db_filename = + sqlite3_db_filename(db->connection_, db_name.c_str()); + if (!db_filename || db_filename[0] == '\0') { + args.GetReturnValue().Set(Null(env->isolate())); + return; + } + + Local<String> ret; + if (String::NewFromUtf8(env->isolate(), db_filename).ToLocal(&ret)) { + args.GetReturnValue().Set(ret); + } +} + void DatabaseSync::AggregateFunction(const FunctionCallbackInfo<Value>& args) { DatabaseSync* db; ASSIGN_OR_RETURN_UNWRAP(&db, args.This()); @@ -2616,6 +2646,8 @@ static void Initialize(Local<Object> target, SetProtoMethod(isolate, db_tmpl, "prepare", DatabaseSync::Prepare); SetProtoMethod(isolate, db_tmpl, "exec", DatabaseSync::Exec); SetProtoMethod(isolate, db_tmpl, "function", DatabaseSync::CustomFunction); + SetProtoMethodNoSideEffect( + isolate, db_tmpl, "location", DatabaseSync::Location); SetProtoMethod( isolate, db_tmpl, "aggregate", DatabaseSync::AggregateFunction); SetProtoMethod( diff --git a/src/node_sqlite.h b/src/node_sqlite.h index 1a5704b7f7799b..eff62a97a8d873 100644 --- a/src/node_sqlite.h +++ b/src/node_sqlite.h @@ -66,6 +66,7 @@ class DatabaseSync : public BaseObject { static void Close(const v8::FunctionCallbackInfo<v8::Value>& args); static void Prepare(const v8::FunctionCallbackInfo<v8::Value>& args); static void Exec(const v8::FunctionCallbackInfo<v8::Value>& args); + static void Location(const v8::FunctionCallbackInfo<v8::Value>& args); static void CustomFunction(const v8::FunctionCallbackInfo<v8::Value>& args); static void AggregateFunction( const v8::FunctionCallbackInfo<v8::Value>& args); diff --git a/test/parallel/test-sqlite-database-sync.js b/test/parallel/test-sqlite-database-sync.js index 9f707bbb21acb1..773958cf2700db 100644 --- a/test/parallel/test-sqlite-database-sync.js +++ b/test/parallel/test-sqlite-database-sync.js @@ -361,3 +361,55 @@ suite('DatabaseSync.prototype.isTransaction', () => { }); }); }); + +suite('DatabaseSync.prototype.location()', () => { + test('throws if database is not open', (t) => { + const db = new DatabaseSync(nextDb(), { open: false }); + + t.assert.throws(() => { + db.location(); + }, { + code: 'ERR_INVALID_STATE', + message: /database is not open/, + }); + }); + + test('throws if provided dbName is not string', (t) => { + const db = new DatabaseSync(nextDb()); + t.after(() => { db.close(); }); + + t.assert.throws(() => { + db.location(null); + }, { + code: 'ERR_INVALID_ARG_TYPE', + message: /The "dbName" argument must be a string/, + }); + }); + + test('returns null when connected to in-memory database', (t) => { + const db = new DatabaseSync(':memory:'); + t.assert.strictEqual(db.location(), null); + }); + + test('returns db path when connected to a persistent database', (t) => { + const dbPath = nextDb(); + const db = new DatabaseSync(dbPath); + t.after(() => { db.close(); }); + t.assert.strictEqual(db.location(), dbPath); + }); + + test('returns that specific db path when attached', (t) => { + const dbPath = nextDb(); + const otherPath = nextDb(); + const db = new DatabaseSync(dbPath); + t.after(() => { db.close(); }); + const other = new DatabaseSync(dbPath); + t.after(() => { other.close(); }); + + // Adding this escape because the test with unusual chars have a single quote which breaks the query + const escapedPath = otherPath.replace("'", "''"); + db.exec(`ATTACH DATABASE '${escapedPath}' AS other`); + + t.assert.strictEqual(db.location('other'), otherPath); + }); +});
diff --git a/doc/api/sqlite.md b/doc/api/sqlite.md index 0f077ed7981484..beddcf90d2552c 100644 --- a/doc/api/sqlite.md +++ b/doc/api/sqlite.md @@ -241,6 +241,19 @@ Enables or disables the `loadExtension` SQL function, and the `loadExtension()` method. When `allowExtension` is `false` when constructing, you cannot enable loading extensions for security reasons. +### `database.location([dbName])` +<!-- YAML +added: REPLACEME +--> +* `dbName` {string} Name of the database. This can be `'main'` (the default primary database) or any other + database that has been added with [`ATTACH DATABASE`][] **Default:** `'main'`. +* Returns: {string | null} The location of the database file. When using an in-memory database, + this method returns null. +This method is a wrapper around [`sqlite3_db_filename()`][] ### `database.exec(sql)` <!-- YAML @@ -846,6 +859,7 @@ resolution handler passed to [`database.applyChangeset()`][]. See also [`sqlite3_column_table_name()`]: https://www.sqlite.org/c3ref/column_database_name.html [`sqlite3_create_function_v2()`]: https://www.sqlite.org/c3ref/create_function.html [`sqlite3_create_window_function()`]: https://www.sqlite.org/c3ref/create_function.html +[`sqlite3_db_filename()`]: https://sqlite.org/c3ref/db_filename.html [`sqlite3_exec()`]: https://www.sqlite.org/c3ref/exec.html [`sqlite3_expanded_sql()`]: https://www.sqlite.org/c3ref/expanded_sql.html [`sqlite3_get_autocommit()`]: https://sqlite.org/c3ref/get_autocommit.html diff --git a/src/node_sqlite.cc b/src/node_sqlite.cc index 2809a8b81cb1b7..339b7cf0f4eff7 100644 --- a/src/node_sqlite.cc +++ b/src/node_sqlite.cc @@ -1184,6 +1184,36 @@ void DatabaseSync::CustomFunction(const FunctionCallbackInfo<Value>& args) { CHECK_ERROR_OR_THROW(env->isolate(), db, r, SQLITE_OK, void()); } +void DatabaseSync::Location(const FunctionCallbackInfo<Value>& args) { + ASSIGN_OR_RETURN_UNWRAP(&db, args.This()); + Environment* env = Environment::GetCurrent(args); + THROW_AND_RETURN_ON_BAD_STATE(env, !db->IsOpen(), "database is not open"); + std::string db_name = "main"; + if (!args[0]->IsUndefined()) { + if (!args[0]->IsString()) { + THROW_ERR_INVALID_ARG_TYPE(env->isolate(), + "The \"dbName\" argument must be a string."); + return; + } + db_name = Utf8Value(env->isolate(), args[0].As<String>()).ToString(); + const char* db_filename = + sqlite3_db_filename(db->connection_, db_name.c_str()); + if (!db_filename || db_filename[0] == '\0') { + return; + Local<String> ret; + if (String::NewFromUtf8(env->isolate(), db_filename).ToLocal(&ret)) { + args.GetReturnValue().Set(ret); +} void DatabaseSync::AggregateFunction(const FunctionCallbackInfo<Value>& args) { DatabaseSync* db; ASSIGN_OR_RETURN_UNWRAP(&db, args.This()); @@ -2616,6 +2646,8 @@ static void Initialize(Local<Object> target, SetProtoMethod(isolate, db_tmpl, "prepare", DatabaseSync::Prepare); SetProtoMethod(isolate, db_tmpl, "exec", DatabaseSync::Exec); SetProtoMethod(isolate, db_tmpl, "function", DatabaseSync::CustomFunction); + SetProtoMethodNoSideEffect( + isolate, db_tmpl, "location", DatabaseSync::Location); isolate, db_tmpl, "aggregate", DatabaseSync::AggregateFunction); diff --git a/src/node_sqlite.h b/src/node_sqlite.h index 1a5704b7f7799b..eff62a97a8d873 100644 --- a/src/node_sqlite.h +++ b/src/node_sqlite.h @@ -66,6 +66,7 @@ class DatabaseSync : public BaseObject { static void Close(const v8::FunctionCallbackInfo<v8::Value>& args); static void Prepare(const v8::FunctionCallbackInfo<v8::Value>& args); static void Exec(const v8::FunctionCallbackInfo<v8::Value>& args); + static void Location(const v8::FunctionCallbackInfo<v8::Value>& args); static void CustomFunction(const v8::FunctionCallbackInfo<v8::Value>& args); static void AggregateFunction( const v8::FunctionCallbackInfo<v8::Value>& args); diff --git a/test/parallel/test-sqlite-database-sync.js b/test/parallel/test-sqlite-database-sync.js index 9f707bbb21acb1..773958cf2700db 100644 --- a/test/parallel/test-sqlite-database-sync.js +++ b/test/parallel/test-sqlite-database-sync.js @@ -361,3 +361,55 @@ suite('DatabaseSync.prototype.isTransaction', () => { }); }); }); +suite('DatabaseSync.prototype.location()', () => { + test('throws if database is not open', (t) => { + const db = new DatabaseSync(nextDb(), { open: false }); + db.location(); + code: 'ERR_INVALID_STATE', + message: /database is not open/, + test('throws if provided dbName is not string', (t) => { + const db = new DatabaseSync(nextDb()); + db.location(null); + code: 'ERR_INVALID_ARG_TYPE', + message: /The "dbName" argument must be a string/, + test('returns null when connected to in-memory database', (t) => { + const db = new DatabaseSync(':memory:'); + t.assert.strictEqual(db.location(), null); + test('returns db path when connected to a persistent database', (t) => { + t.assert.strictEqual(db.location(), dbPath); + test('returns that specific db path when attached', (t) => { + const otherPath = nextDb(); + const other = new DatabaseSync(dbPath); + t.after(() => { other.close(); }); + // Adding this escape because the test with unusual chars have a single quote which breaks the query + const escapedPath = otherPath.replace("'", "''"); + db.exec(`ATTACH DATABASE '${escapedPath}' AS other`); + t.assert.strictEqual(db.location('other'), otherPath); +});
[ "+ DatabaseSync* db;", "+ args.GetReturnValue().Set(Null(env->isolate()));" ]
[ 41, 60 ]
{ "additions": 99, "author": "geeksilva97", "deletions": 0, "html_url": "https://github.com/nodejs/node/pull/57860", "issue_id": 57860, "merged_at": "2025-04-21T14:57:15Z", "omission_probability": 0.1, "pr_number": 57860, "repo": "nodejs/node", "title": "sqlite: add location method", "total_changes": 99 }
848
diff --git a/tools/actions/lint-release-proposal-commit-list.mjs b/tools/actions/lint-release-proposal-commit-list.mjs index b9745bad3c30c1..c0cef2dcf02e60 100755 --- a/tools/actions/lint-release-proposal-commit-list.mjs +++ b/tools/actions/lint-release-proposal-commit-list.mjs @@ -19,14 +19,24 @@ const stdinLineByLine = createInterface(process.stdin)[Symbol.asyncIterator](); const changelog = await readFile(CHANGELOG_PATH, 'utf-8'); const commitListingStart = changelog.indexOf('\n### Commits\n'); -const commitListingEnd = changelog.indexOf('\n\n<a', commitListingStart); -const commitList = changelog.slice(commitListingStart, commitListingEnd === -1 ? undefined : commitListingEnd + 1) - // Checking for semverness is too expansive, it is left as a exercice for human reviewers. +let commitList; +if (commitListingStart === -1) { + // We're preparing a semver-major release. + commitList = changelog.replace(/(^.+\n### Semver-Major|\n### Semver-(Minor|Patch)) Commits\n/gs, '') + .replaceAll('**(SEMVER-MAJOR)** ', ''); +} else { + const commitListingEnd = changelog.indexOf('\n\n<a', commitListingStart); + assert.notStrictEqual(commitListingEnd, -1); + commitList = changelog.slice(commitListingStart, commitListingEnd + 1); +} + +// Normalize for consistent comparison +commitList = commitList .replaceAll('**(SEMVER-MINOR)** ', '') - // Correct Markdown escaping is validated by the linter, getting rid of it here helps. .replaceAll('\\', ''); -let expectedNumberOfCommitsLeft = commitList.match(/\n\* \[/g).length; +let expectedNumberOfCommitsLeft = commitList.match(/\n\* \[/g)?.length ?? 0; + for await (const line of stdinLineByLine) { const { smallSha, title, prURL } = JSON.parse(line);
diff --git a/tools/actions/lint-release-proposal-commit-list.mjs b/tools/actions/lint-release-proposal-commit-list.mjs index b9745bad3c30c1..c0cef2dcf02e60 100755 --- a/tools/actions/lint-release-proposal-commit-list.mjs +++ b/tools/actions/lint-release-proposal-commit-list.mjs @@ -19,14 +19,24 @@ const stdinLineByLine = createInterface(process.stdin)[Symbol.asyncIterator](); const changelog = await readFile(CHANGELOG_PATH, 'utf-8'); const commitListingStart = changelog.indexOf('\n### Commits\n'); -const commitListingEnd = changelog.indexOf('\n\n<a', commitListingStart); -const commitList = changelog.slice(commitListingStart, commitListingEnd === -1 ? undefined : commitListingEnd + 1) - // Checking for semverness is too expansive, it is left as a exercice for human reviewers. +let commitList; +if (commitListingStart === -1) { + // We're preparing a semver-major release. + commitList = changelog.replace(/(^.+\n### Semver-Major|\n### Semver-(Minor|Patch)) Commits\n/gs, '') + .replaceAll('**(SEMVER-MAJOR)** ', ''); + const commitListingEnd = changelog.indexOf('\n\n<a', commitListingStart); + assert.notStrictEqual(commitListingEnd, -1); + commitList = changelog.slice(commitListingStart, commitListingEnd + 1); +} +// Normalize for consistent comparison +commitList = commitList .replaceAll('**(SEMVER-MINOR)** ', '') - // Correct Markdown escaping is validated by the linter, getting rid of it here helps. .replaceAll('\\', ''); -let expectedNumberOfCommitsLeft = commitList.match(/\n\* \[/g).length; +let expectedNumberOfCommitsLeft = commitList.match(/\n\* \[/g)?.length ?? 0; for await (const line of stdinLineByLine) { const { smallSha, title, prURL } = JSON.parse(line);
[ "+} else {" ]
[ 16 ]
{ "additions": 15, "author": "aduh95", "deletions": 5, "html_url": "https://github.com/nodejs/node/pull/57892", "issue_id": 57892, "merged_at": "2025-04-21T06:33:57Z", "omission_probability": 0.1, "pr_number": 57892, "repo": "nodejs/node", "title": "tools: add semver-major release support to release-lint", "total_changes": 20 }
849
diff --git a/src/node_dotenv.cc b/src/node_dotenv.cc index dd660058115ddb..d5f14fa92e2694 100644 --- a/src/node_dotenv.cc +++ b/src/node_dotenv.cc @@ -102,8 +102,11 @@ MaybeLocal<Object> Dotenv::ToObject(Environment* env) const { return scope.Escape(result); } -// Removes space characters (spaces, tabs and newlines) from -// the start and end of a given input string +// Removes leading and trailing spaces from a string_view. +// Returns an empty string_view if the input is empty. +// Example: +// trim_spaces(" hello ") -> "hello" +// trim_spaces("") -> "" std::string_view trim_spaces(std::string_view input) { if (input.empty()) return ""; @@ -135,33 +138,42 @@ void Dotenv::ParseContent(const std::string_view input) { while (!content.empty()) { // Skip empty lines and comments if (content.front() == '\n' || content.front() == '#') { + // Check if the first character of the content is a newline or a hash auto newline = content.find('\n'); if (newline != std::string_view::npos) { - content.remove_prefix(newline + 1); - continue; - } - } - - // If there is no equal character, then ignore everything - auto equal = content.find('='); - if (equal == std::string_view::npos) { - auto newline = content.find('\n'); - if (newline != std::string_view::npos) { - // If we used `newline` only, - // the '\n' might remain and cause an empty-line parse + // Remove everything up to and including the newline character content.remove_prefix(newline + 1); } else { + // If no newline is found, clear the content content = {}; } - // No valid data here, skip to next line + + // Skip the remaining code in the loop and continue with the next + // iteration. continue; } - key = content.substr(0, equal); - content.remove_prefix(equal + 1); + // Find the next equals sign or newline in a single pass. + // This optimizes the search by avoiding multiple iterations. + auto equal_or_newline = content.find_first_of("=\n"); + + // If we found nothing or found a newline before equals, the line is invalid + if (equal_or_newline == std::string_view::npos || + content.at(equal_or_newline) == '\n') { + if (equal_or_newline != std::string_view::npos) { + content.remove_prefix(equal_or_newline + 1); + content = trim_spaces(content); + continue; + } + break; + } + + // We found an equals sign, extract the key + key = content.substr(0, equal_or_newline); + content.remove_prefix(equal_or_newline + 1); key = trim_spaces(key); - // If the value is not present (e.g. KEY=) set is to an empty string + // If the value is not present (e.g. KEY=) set it to an empty string if (content.empty() || content.front() == '\n') { store_.insert_or_assign(std::string(key), ""); continue; @@ -169,13 +181,19 @@ void Dotenv::ParseContent(const std::string_view input) { content = trim_spaces(content); - if (key.empty()) { - break; - } + // Skip lines with empty keys after trimming spaces. + // Examples of invalid keys that would be skipped: + // =value + // " "=value + if (key.empty()) continue; - // Remove export prefix from key + // Remove export prefix from key and ensure proper spacing. + // Example: export FOO=bar -> FOO=bar if (key.starts_with("export ")) { key.remove_prefix(7); + // Trim spaces after removing export prefix to handle cases like: + // export FOO=bar + key = trim_spaces(key); } // SAFETY: Content is guaranteed to have at least one character @@ -194,6 +212,7 @@ void Dotenv::ParseContent(const std::string_view input) { value = content.substr(1, closing_quote - 1); std::string multi_line_value = std::string(value); + // Replace \n with actual newlines in double-quoted strings size_t pos = 0; while ((pos = multi_line_value.find("\\n", pos)) != std::string_view::npos) { @@ -206,15 +225,17 @@ void Dotenv::ParseContent(const std::string_view input) { if (newline != std::string_view::npos) { content.remove_prefix(newline + 1); } else { + // In case the last line is a single key/value pair + // Example: KEY=VALUE (without a newline at the EOF content = {}; } continue; } } - // Check if the value is wrapped in quotes, single quotes or backticks - if ((content.front() == '\'' || content.front() == '"' || - content.front() == '`')) { + // Handle quoted values (single quotes, double quotes, backticks) + if (content.front() == '\'' || content.front() == '"' || + content.front() == '`') { auto closing_quote = content.find(content.front(), 1); // Check if the closing quote is not found @@ -228,13 +249,16 @@ void Dotenv::ParseContent(const std::string_view input) { value = content.substr(0, newline); store_.insert_or_assign(std::string(key), value); content.remove_prefix(newline + 1); + } else { + // No newline - take rest of content + value = content; + store_.insert_or_assign(std::string(key), value); + break; } } else { - // Example: KEY="value" + // Found closing quote - take content between quotes value = content.substr(1, closing_quote - 1); store_.insert_or_assign(std::string(key), value); - // Select the first newline after the closing quotation mark - // since there could be newline characters inside the value. auto newline = content.find('\n', closing_quote + 1); if (newline != std::string_view::npos) { // Use +1 to discard the '\n' itself => next line @@ -257,13 +281,13 @@ void Dotenv::ParseContent(const std::string_view input) { // Example: KEY=value # comment // The value pair should be `value` if (hash_character != std::string_view::npos) { - value = content.substr(0, hash_character); + value = value.substr(0, hash_character); } - store_.insert_or_assign(std::string(key), trim_spaces(value)); + value = trim_spaces(value); + store_.insert_or_assign(std::string(key), std::string(value)); content.remove_prefix(newline + 1); } else { - // In case the last line is a single key/value pair - // Example: KEY=VALUE (without a newline at the EOF) + // Last line without newline value = content; auto hash_char = value.find('#'); if (hash_char != std::string_view::npos) { @@ -272,9 +296,9 @@ void Dotenv::ParseContent(const std::string_view input) { store_.insert_or_assign(std::string(key), trim_spaces(value)); content = {}; } - - store_.insert_or_assign(std::string(key), trim_spaces(value)); } + + content = trim_spaces(content); } } diff --git a/test/parallel/test-dotenv-edge-cases.js b/test/parallel/test-dotenv-edge-cases.js index 68866d828d2889..88b2fc6fca7cb7 100644 --- a/test/parallel/test-dotenv-edge-cases.js +++ b/test/parallel/test-dotenv-edge-cases.js @@ -4,6 +4,7 @@ const common = require('../common'); const assert = require('node:assert'); const path = require('node:path'); const { describe, it } = require('node:test'); +const { parseEnv } = require('node:util'); const fixtures = require('../common/fixtures'); const validEnvFilePath = '../fixtures/dotenv/valid.env'; @@ -200,4 +201,61 @@ describe('.env supports edge cases', () => { assert.strictEqual(child.code, 9); assert.match(child.stderr, /bad option: --env-file-ABCD/); }); + + it('should handle invalid multiline syntax', () => { + const result = parseEnv([ + 'foo', + '', + 'bar', + 'baz=whatever', + 'VALID_AFTER_INVALID=test', + 'multiple_invalid', + 'lines_without_equals', + 'ANOTHER_VALID=value', + ].join('\n')); + + assert.deepStrictEqual(result, { + baz: 'whatever', + VALID_AFTER_INVALID: 'test', + ANOTHER_VALID: 'value', + }); + }); + + it('should handle trimming of keys and values correctly', () => { + const result = parseEnv([ + ' KEY_WITH_SPACES_BEFORE= value_with_spaces_before_and_after ', + 'KEY_WITH_TABS_BEFORE\t=\tvalue_with_tabs_before_and_after\t', + 'KEY_WITH_SPACES_AND_TABS\t = \t value_with_spaces_and_tabs \t', + ' KEY_WITH_SPACES_ONLY =value', + 'KEY_WITH_NO_VALUE=', + 'KEY_WITH_SPACES_AFTER= value ', + 'KEY_WITH_SPACES_AND_COMMENT=value # this is a comment', + 'KEY_WITH_ONLY_COMMENT=# this is a comment', + 'KEY_WITH_EXPORT=export value', + ' export KEY_WITH_EXPORT_AND_SPACES = value ', + ].join('\n')); + + assert.deepStrictEqual(result, { + KEY_WITH_SPACES_BEFORE: 'value_with_spaces_before_and_after', + KEY_WITH_TABS_BEFORE: 'value_with_tabs_before_and_after', + KEY_WITH_SPACES_AND_TABS: 'value_with_spaces_and_tabs', + KEY_WITH_SPACES_ONLY: 'value', + KEY_WITH_NO_VALUE: '', + KEY_WITH_ONLY_COMMENT: '', + KEY_WITH_SPACES_AFTER: 'value', + KEY_WITH_SPACES_AND_COMMENT: 'value', + KEY_WITH_EXPORT: 'export value', + KEY_WITH_EXPORT_AND_SPACES: 'value', + }); + }); + + it('should handle a comment in a valid value', () => { + const result = parseEnv([ + 'KEY_WITH_COMMENT_IN_VALUE="value # this is a comment"', + ].join('\n')); + + assert.deepStrictEqual(result, { + KEY_WITH_COMMENT_IN_VALUE: 'value # this is a comment', + }); + }); });
diff --git a/src/node_dotenv.cc b/src/node_dotenv.cc index dd660058115ddb..d5f14fa92e2694 100644 --- a/src/node_dotenv.cc +++ b/src/node_dotenv.cc @@ -102,8 +102,11 @@ MaybeLocal<Object> Dotenv::ToObject(Environment* env) const { return scope.Escape(result); -// Removes space characters (spaces, tabs and newlines) from +// Removes leading and trailing spaces from a string_view. +// Returns an empty string_view if the input is empty. +// Example: +// trim_spaces(" hello ") -> "hello" +// trim_spaces("") -> "" std::string_view trim_spaces(std::string_view input) { if (input.empty()) return ""; @@ -135,33 +138,42 @@ void Dotenv::ParseContent(const std::string_view input) { while (!content.empty()) { // Skip empty lines and comments if (content.front() == '\n' || content.front() == '#') { + // Check if the first character of the content is a newline or a hash auto newline = content.find('\n'); if (newline != std::string_view::npos) { - content.remove_prefix(newline + 1); - continue; - } - // If there is no equal character, then ignore everything - auto equal = content.find('='); - if (equal == std::string_view::npos) { - if (newline != std::string_view::npos) { - // If we used `newline` only, - // the '\n' might remain and cause an empty-line parse + // Remove everything up to and including the newline character - // No valid data here, skip to next line + // Skip the remaining code in the loop and continue with the next + // iteration. - key = content.substr(0, equal); - content.remove_prefix(equal + 1); + // Find the next equals sign or newline in a single pass. + // This optimizes the search by avoiding multiple iterations. + auto equal_or_newline = content.find_first_of("=\n"); + // If we found nothing or found a newline before equals, the line is invalid + if (equal_or_newline == std::string_view::npos || + content.at(equal_or_newline) == '\n') { + if (equal_or_newline != std::string_view::npos) { + content.remove_prefix(equal_or_newline + 1); + content = trim_spaces(content); + continue; + } + break; + } + // We found an equals sign, extract the key + key = content.substr(0, equal_or_newline); + content.remove_prefix(equal_or_newline + 1); key = trim_spaces(key); - // If the value is not present (e.g. KEY=) set is to an empty string + // If the value is not present (e.g. KEY=) set it to an empty string if (content.empty() || content.front() == '\n') { store_.insert_or_assign(std::string(key), ""); @@ -169,13 +181,19 @@ void Dotenv::ParseContent(const std::string_view input) { content = trim_spaces(content); - if (key.empty()) { + // Skip lines with empty keys after trimming spaces. + // Examples of invalid keys that would be skipped: + // =value + // " "=value + if (key.empty()) continue; - // Remove export prefix from key + // Example: export FOO=bar -> FOO=bar if (key.starts_with("export ")) { key.remove_prefix(7); + // export FOO=bar + key = trim_spaces(key); // SAFETY: Content is guaranteed to have at least one character @@ -194,6 +212,7 @@ void Dotenv::ParseContent(const std::string_view input) { std::string multi_line_value = std::string(value); + // Replace \n with actual newlines in double-quoted strings size_t pos = 0; while ((pos = multi_line_value.find("\\n", pos)) != std::string_view::npos) { @@ -206,15 +225,17 @@ void Dotenv::ParseContent(const std::string_view input) { } else { + // In case the last line is a single key/value pair + // Example: KEY=VALUE (without a newline at the EOF content = {}; continue; - // Check if the value is wrapped in quotes, single quotes or backticks - if ((content.front() == '\'' || content.front() == '"' || + // Handle quoted values (single quotes, double quotes, backticks) + if (content.front() == '\'' || content.front() == '"' || + content.front() == '`') { auto closing_quote = content.find(content.front(), 1); // Check if the closing quote is not found @@ -228,13 +249,16 @@ void Dotenv::ParseContent(const std::string_view input) { value = content.substr(0, newline); store_.insert_or_assign(std::string(key), value); + } else { + // No newline - take rest of content + value = content; + store_.insert_or_assign(std::string(key), value); + break; - // Example: KEY="value" + // Found closing quote - take content between quotes store_.insert_or_assign(std::string(key), value); - // Select the first newline after the closing quotation mark - // since there could be newline characters inside the value. auto newline = content.find('\n', closing_quote + 1); // Use +1 to discard the '\n' itself => next line @@ -257,13 +281,13 @@ void Dotenv::ParseContent(const std::string_view input) { // Example: KEY=value # comment // The value pair should be `value` if (hash_character != std::string_view::npos) { - value = content.substr(0, hash_character); + value = value.substr(0, hash_character); + value = trim_spaces(value); + store_.insert_or_assign(std::string(key), std::string(value)); - // In case the last line is a single key/value pair - // Example: KEY=VALUE (without a newline at the EOF) + // Last line without newline value = content; auto hash_char = value.find('#'); if (hash_char != std::string_view::npos) { @@ -272,9 +296,9 @@ void Dotenv::ParseContent(const std::string_view input) { store_.insert_or_assign(std::string(key), trim_spaces(value)); - store_.insert_or_assign(std::string(key), trim_spaces(value)); + content = trim_spaces(content); } diff --git a/test/parallel/test-dotenv-edge-cases.js b/test/parallel/test-dotenv-edge-cases.js index 68866d828d2889..88b2fc6fca7cb7 100644 --- a/test/parallel/test-dotenv-edge-cases.js +++ b/test/parallel/test-dotenv-edge-cases.js @@ -4,6 +4,7 @@ const common = require('../common'); const assert = require('node:assert'); const path = require('node:path'); const { describe, it } = require('node:test'); +const { parseEnv } = require('node:util'); const fixtures = require('../common/fixtures'); const validEnvFilePath = '../fixtures/dotenv/valid.env'; @@ -200,4 +201,61 @@ describe('.env supports edge cases', () => { assert.strictEqual(child.code, 9); assert.match(child.stderr, /bad option: --env-file-ABCD/); }); + it('should handle invalid multiline syntax', () => { + 'foo', + '', + 'bar', + 'baz=whatever', + 'VALID_AFTER_INVALID=test', + 'multiple_invalid', + 'lines_without_equals', + 'ANOTHER_VALID=value', + baz: 'whatever', + VALID_AFTER_INVALID: 'test', + ANOTHER_VALID: 'value', + it('should handle trimming of keys and values correctly', () => { + ' KEY_WITH_SPACES_BEFORE= value_with_spaces_before_and_after ', + 'KEY_WITH_TABS_BEFORE\t=\tvalue_with_tabs_before_and_after\t', + 'KEY_WITH_SPACES_AND_TABS\t = \t value_with_spaces_and_tabs \t', + ' KEY_WITH_SPACES_ONLY =value', + 'KEY_WITH_NO_VALUE=', + 'KEY_WITH_SPACES_AFTER= value ', + 'KEY_WITH_SPACES_AND_COMMENT=value # this is a comment', + 'KEY_WITH_ONLY_COMMENT=# this is a comment', + 'KEY_WITH_EXPORT=export value', + ' export KEY_WITH_EXPORT_AND_SPACES = value ', + KEY_WITH_SPACES_BEFORE: 'value_with_spaces_before_and_after', + KEY_WITH_TABS_BEFORE: 'value_with_tabs_before_and_after', + KEY_WITH_NO_VALUE: '', + KEY_WITH_ONLY_COMMENT: '', + KEY_WITH_SPACES_AFTER: 'value', + KEY_WITH_SPACES_AND_COMMENT: 'value', + KEY_WITH_EXPORT: 'export value', + KEY_WITH_EXPORT_AND_SPACES: 'value', + it('should handle a comment in a valid value', () => { + 'KEY_WITH_COMMENT_IN_VALUE="value # this is a comment"', + KEY_WITH_COMMENT_IN_VALUE: 'value # this is a comment', });
[ "-// the start and end of a given input string", "- auto newline = content.find('\\n');", "+ // If no newline is found, clear the content", "- break;", "+ // Remove export prefix from key and ensure proper spacing.", "+ // Trim spaces after removing export prefix to handle cases like:", "- content.front() == '`')) {", "- store_.insert_or_assign(std::string(key), trim_spaces(value));", "+ KEY_WITH_SPACES_AND_TABS: 'value_with_spaces_and_tabs',", "+ KEY_WITH_SPACES_ONLY: 'value'," ]
[ 9, 33, 40, 82, 91, 95, 123, 157, 233, 234 ]
{ "additions": 116, "author": "AugustinMauroy", "deletions": 34, "html_url": "https://github.com/nodejs/node/pull/57798", "issue_id": 57798, "merged_at": "2025-04-21T06:46:45Z", "omission_probability": 0.1, "pr_number": 57798, "repo": "nodejs/node", "title": "util: fix parseEnv handling of invalid lines", "total_changes": 150 }
850
diff --git a/Makefile b/Makefile index 69c026755bf230..36e79eb99de38d 100644 --- a/Makefile +++ b/Makefile @@ -809,7 +809,7 @@ doc: $(NODE_EXE) doc-only ## Build Node.js, and then build the documentation wit out/doc: mkdir -p $@ - cp doc/node_config_json_schema.json $@ + cp doc/node-config-schema.json $@ # If it's a source tarball, doc/api already contains the generated docs. # Just copy everything under doc/api over. diff --git a/doc/api/cli.md b/doc/api/cli.md index 0e6ca372337651..a102038344b006 100644 --- a/doc/api/cli.md +++ b/doc/api/cli.md @@ -911,7 +911,7 @@ added: v23.6.0 Enable experimental import support for `.node` addons. -### `--experimental-config-file` +### `--experimental-config-file=config` <!-- YAML added: REPLACEME @@ -919,21 +919,22 @@ added: REPLACEME > Stability: 1.0 - Early development -Use this flag to specify a configuration file that will be loaded and parsed -before the application starts. +If present, Node.js will look for a +configuration file at the specified path. Node.js will read the configuration file and apply the settings. The configuration file should be a JSON file with the following structure: +> \[!NOTE] +> Replace `vX.Y.Z` in the `$schema` with the version of Node.js you are using. + ```json { - "$schema": "https://nodejs.org/dist/REPLACEME/docs/node_config_json_schema.json", + "$schema": "https://nodejs.org/dist/vX.Y.Z/docs/node-config-schema.json", "nodeOptions": { - "experimental-transform-types": true, "import": [ - "amaro/transform" + "amaro/strip" ], - "disable-warning": "ExperimentalWarning", "watch-path": "src", "watch-preserve-output": true } @@ -944,7 +945,7 @@ In the `nodeOptions` field, only flags that are allowed in [`NODE_OPTIONS`][] ar No-op flags are not supported. Not all V8 flags are currently supported. -It is possible to use the [official JSON schema](../node_config_json_schema.json) +It is possible to use the [official JSON schema](../node-config-schema.json) to validate the configuration file, which may vary depending on the Node.js version. Each key in the configuration file corresponds to a flag that can be passed as a command-line argument. The value of the key is the value that would be @@ -954,7 +955,7 @@ For example, the configuration file above is equivalent to the following command-line arguments: ```bash -node --experimental-transform-types --import amaro/transform --disable-warning=ExperimentalWarning --watch-path=src --watch-preserve-output +node --import amaro/strip --watch-path=src --watch-preserve-output ``` The priority in configuration is as follows: @@ -976,6 +977,18 @@ unknown keys or keys that cannot used in `NODE_OPTIONS`. Node.js will not sanitize or perform validation on the user-provided configuration, so **NEVER** use untrusted configuration files. +### `--experimental-default-config-file` + +<!-- YAML +added: REPLACEME +--> + +> Stability: 1.0 - Early development + +If the `--experimental-default-config-file` flag is present, Node.js will look for a +`node.config.json` file in the current working directory and load it as a +as configuration file. + ### `--experimental-eventsource` <!-- YAML diff --git a/doc/node_config_json_schema.json b/doc/node-config-schema.json similarity index 100% rename from doc/node_config_json_schema.json rename to doc/node-config-schema.json diff --git a/doc/node.1 b/doc/node.1 index ad8873b423105c..9ed99be3b7771e 100644 --- a/doc/node.1 +++ b/doc/node.1 @@ -167,7 +167,10 @@ Interpret the entry point as a URL. Enable experimental addon module support. . .It Fl -experimental-config-file -Enable support for experimental config file +Specifies the configuration file to load. +. +.It Fl -experimental-default-config-file +Enable support for automatically loading node.config.json. . .It Fl -experimental-import-meta-resolve Enable experimental ES modules support for import.meta.resolve(). diff --git a/lib/internal/process/pre_execution.js b/lib/internal/process/pre_execution.js index c705fce930da24..1c4a9407552829 100644 --- a/lib/internal/process/pre_execution.js +++ b/lib/internal/process/pre_execution.js @@ -315,7 +315,8 @@ function setupSQLite() { } function initializeConfigFileSupport() { - if (getOptionValue('--experimental-config-file')) { + if (getOptionValue('--experimental-default-config-file') || + getOptionValue('--experimental-config-file')) { emitExperimentalWarning('--experimental-config-file'); } } diff --git a/src/node_config_file.cc b/src/node_config_file.cc index e25ea8ad14fc2c..d801d935a41706 100644 --- a/src/node_config_file.cc +++ b/src/node_config_file.cc @@ -8,22 +8,32 @@ namespace node { std::optional<std::string_view> ConfigReader::GetDataFromArgs( const std::vector<std::string>& args) { - constexpr std::string_view flag = "--experimental-config-file"; + constexpr std::string_view flag_path = "--experimental-config-file"; + constexpr std::string_view default_file = + "--experimental-default-config-file"; + + bool has_default_config_file = false; for (auto it = args.begin(); it != args.end(); ++it) { - if (*it == flag) { + if (*it == flag_path) { // Case: "--experimental-config-file foo" if (auto next = std::next(it); next != args.end()) { return *next; } - } else if (it->starts_with(flag)) { + } else if (it->starts_with(flag_path)) { // Case: "--experimental-config-file=foo" - if (it->size() > flag.size() && (*it)[flag.size()] == '=') { - return it->substr(flag.size() + 1); + if (it->size() > flag_path.size() && (*it)[flag_path.size()] == '=') { + return it->substr(flag_path.size() + 1); } + } else if (*it == default_file || it->starts_with(default_file)) { + has_default_config_file = true; } } + if (has_default_config_file) { + return "node.config.json"; + } + return std::nullopt; } diff --git a/src/node_options.cc b/src/node_options.cc index 89af5ff4422996..16cce0df0e2263 100644 --- a/src/node_options.cc +++ b/src/node_options.cc @@ -685,7 +685,10 @@ EnvironmentOptionsParser::EnvironmentOptionsParser() { Implies("--env-file-if-exists", "[has_env_file_string]"); AddOption("--experimental-config-file", "set config file from supplied file", - &EnvironmentOptions::experimental_config_file); + &EnvironmentOptions::experimental_config_file_path); + AddOption("--experimental-default-config-file", + "set config file from default config file", + &EnvironmentOptions::experimental_default_config_file); AddOption("--test", "launch test runner on startup", &EnvironmentOptions::test_runner); diff --git a/src/node_options.h b/src/node_options.h index 93fbebd20e656d..baa615e310e17b 100644 --- a/src/node_options.h +++ b/src/node_options.h @@ -258,7 +258,8 @@ class EnvironmentOptions : public Options { bool report_exclude_env = false; bool report_exclude_network = false; - std::string experimental_config_file; + std::string experimental_config_file_path; + bool experimental_default_config_file = false; inline DebugOptions* get_debug_options() { return &debug_options_; } inline const DebugOptions& debug_options() const { return debug_options_; } diff --git a/test/fixtures/rc/default/node.config.json b/test/fixtures/rc/default/node.config.json new file mode 100644 index 00000000000000..54bcbfef04a947 --- /dev/null +++ b/test/fixtures/rc/default/node.config.json @@ -0,0 +1,5 @@ +{ + "nodeOptions": { + "max-http-header-size": 10 + } +} diff --git a/test/fixtures/rc/default/override.json b/test/fixtures/rc/default/override.json new file mode 100644 index 00000000000000..0f6f763cad86c6 --- /dev/null +++ b/test/fixtures/rc/default/override.json @@ -0,0 +1,5 @@ +{ + "nodeOptions": { + "max-http-header-size": 20 + } +} diff --git a/test/fixtures/rc/non-readable/node.config.json b/test/fixtures/rc/non-readable/node.config.json new file mode 100755 index 00000000000000..21e2b85fbda8fc --- /dev/null +++ b/test/fixtures/rc/non-readable/node.config.json @@ -0,0 +1,5 @@ +{ + "nodeOptions": { + "max-http-header-size": 10 + } +} diff --git a/test/parallel/test-config-file.js b/test/parallel/test-config-file.js index a53de3be8ed6a5..2154fd37c692c8 100644 --- a/test/parallel/test-config-file.js +++ b/test/parallel/test-config-file.js @@ -4,6 +4,8 @@ const { spawnPromisified } = require('../common'); const fixtures = require('../common/fixtures'); const { match, strictEqual } = require('node:assert'); const { test } = require('node:test'); +const { chmodSync, constants } = require('node:fs'); +const common = require('../common'); test('should handle non existing json', async () => { const result = await spawnPromisified(process.execPath, [ @@ -304,3 +306,47 @@ test('broken value in node_options', async () => { strictEqual(result.stdout, ''); strictEqual(result.code, 9); }); + +test('should use node.config.json as default', async () => { + const result = await spawnPromisified(process.execPath, [ + '--no-warnings', + '--experimental-default-config-file', + '-p', 'http.maxHeaderSize', + ], { + cwd: fixtures.path('rc/default'), + }); + strictEqual(result.stderr, ''); + strictEqual(result.stdout, '10\n'); + strictEqual(result.code, 0); +}); + +test('should override node.config.json when specificied', async () => { + const result = await spawnPromisified(process.execPath, [ + '--no-warnings', + '--experimental-default-config-file', + '--experimental-config-file', + fixtures.path('rc/default/override.json'), + '-p', 'http.maxHeaderSize', + ], { + cwd: fixtures.path('rc/default'), + }); + strictEqual(result.stderr, ''); + strictEqual(result.stdout, '20\n'); + strictEqual(result.code, 0); +}); +// Skip on windows because it doesn't support chmod changing read permissions +test('should throw an error when the file is non readable', { skip: common.isWindows }, async () => { + chmodSync(fixtures.path('rc/non-readable/node.config.json'), constants.O_RDONLY); + const result = await spawnPromisified(process.execPath, [ + '--no-warnings', + '--experimental-default-config-file', + '-p', 'http.maxHeaderSize', + ], { + cwd: fixtures.path('rc/non-readable'), + }); + match(result.stderr, /Cannot read configuration from node\.config\.json: permission denied/); + strictEqual(result.stdout, ''); + strictEqual(result.code, 9); + chmodSync(fixtures.path('rc/non-readable/node.config.json'), + constants.S_IRWXU | constants.S_IRWXG | constants.S_IRWXO); +}); diff --git a/test/parallel/test-config-json-schema.js b/test/parallel/test-config-json-schema.js index 0872a2419c0224..5a4c1075d0fdb5 100644 --- a/test/parallel/test-config-json-schema.js +++ b/test/parallel/test-config-json-schema.js @@ -24,7 +24,7 @@ if (!common.hasIntl) { const { generateConfigJsonSchema, } = require('internal/options'); -const schemaInDoc = require('../../doc/node_config_json_schema.json'); +const schemaInDoc = require('../../doc/node-config-schema.json'); const assert = require('assert'); const schema = generateConfigJsonSchema(); @@ -35,6 +35,6 @@ const schema = generateConfigJsonSchema(); // current JSON schema. // To regenerate the JSON schema, run: // out/Release/node --expose-internals tools/doc/generate-json-schema.mjs -// And then run make doc to update the out/doc/node_config_json_schema.json file. +// And then run make doc to update the out/doc/node-config-schema.json file. assert.strictEqual(JSON.stringify(schema), JSON.stringify(schemaInDoc), 'JSON schema is outdated.' + 'Run `out/Release/node --expose-internals tools/doc/generate-json-schema.mjs` to update it.'); diff --git a/tools/doc/generate-json-schema.mjs b/tools/doc/generate-json-schema.mjs index 83a0323fa88ef1..29f15605026c9f 100644 --- a/tools/doc/generate-json-schema.mjs +++ b/tools/doc/generate-json-schema.mjs @@ -4,4 +4,4 @@ import internal from 'internal/options'; import { writeFileSync } from 'fs'; const schema = internal.generateConfigJsonSchema(); -writeFileSync('doc/node_config_json_schema.json', `${JSON.stringify(schema, null, 2)}\n`); +writeFileSync('doc/node-config-schema.json', `${JSON.stringify(schema, null, 2)}\n`);
diff --git a/Makefile b/Makefile index 69c026755bf230..36e79eb99de38d 100644 --- a/Makefile +++ b/Makefile @@ -809,7 +809,7 @@ doc: $(NODE_EXE) doc-only ## Build Node.js, and then build the documentation wit out/doc: mkdir -p $@ - cp doc/node_config_json_schema.json $@ # If it's a source tarball, doc/api already contains the generated docs. # Just copy everything under doc/api over. diff --git a/doc/api/cli.md b/doc/api/cli.md index 0e6ca372337651..a102038344b006 100644 --- a/doc/api/cli.md +++ b/doc/api/cli.md @@ -911,7 +911,7 @@ added: v23.6.0 Enable experimental import support for `.node` addons. -### `--experimental-config-file` +### `--experimental-config-file=config` added: REPLACEME @@ -919,21 +919,22 @@ added: REPLACEME > Stability: 1.0 - Early development -Use this flag to specify a configuration file that will be loaded and parsed -before the application starts. +If present, Node.js will look for a +configuration file at the specified path. Node.js will read the configuration file and apply the settings. The configuration file should be a JSON file with the following structure: +> \[!NOTE] +> Replace `vX.Y.Z` in the `$schema` with the version of Node.js you are using. ```json { - "$schema": "https://nodejs.org/dist/REPLACEME/docs/node_config_json_schema.json", + "$schema": "https://nodejs.org/dist/vX.Y.Z/docs/node-config-schema.json", "nodeOptions": { - "experimental-transform-types": true, "import": [ - "amaro/transform" + "amaro/strip" ], - "disable-warning": "ExperimentalWarning", "watch-path": "src", "watch-preserve-output": true @@ -944,7 +945,7 @@ In the `nodeOptions` field, only flags that are allowed in [`NODE_OPTIONS`][] ar No-op flags are not supported. Not all V8 flags are currently supported. -It is possible to use the [official JSON schema](../node_config_json_schema.json) to validate the configuration file, which may vary depending on the Node.js version. Each key in the configuration file corresponds to a flag that can be passed as a command-line argument. The value of the key is the value that would be @@ -954,7 +955,7 @@ For example, the configuration file above is equivalent to the following command-line arguments: ```bash -node --experimental-transform-types --import amaro/transform --disable-warning=ExperimentalWarning --watch-path=src --watch-preserve-output +node --import amaro/strip --watch-path=src --watch-preserve-output ``` The priority in configuration is as follows: @@ -976,6 +977,18 @@ unknown keys or keys that cannot used in `NODE_OPTIONS`. Node.js will not sanitize or perform validation on the user-provided configuration, so **NEVER** use untrusted configuration files. +<!-- YAML +added: REPLACEME +--> +> Stability: 1.0 - Early development +as configuration file. ### `--experimental-eventsource` diff --git a/doc/node_config_json_schema.json b/doc/node-config-schema.json similarity index 100% rename from doc/node_config_json_schema.json rename to doc/node-config-schema.json diff --git a/doc/node.1 b/doc/node.1 index ad8873b423105c..9ed99be3b7771e 100644 --- a/doc/node.1 +++ b/doc/node.1 @@ -167,7 +167,10 @@ Interpret the entry point as a URL. Enable experimental addon module support. .It Fl -experimental-config-file -Enable support for experimental config file +Specifies the configuration file to load. +. +.It Fl -experimental-default-config-file +Enable support for automatically loading node.config.json. .It Fl -experimental-import-meta-resolve Enable experimental ES modules support for import.meta.resolve(). diff --git a/lib/internal/process/pre_execution.js b/lib/internal/process/pre_execution.js index c705fce930da24..1c4a9407552829 100644 --- a/lib/internal/process/pre_execution.js +++ b/lib/internal/process/pre_execution.js @@ -315,7 +315,8 @@ function setupSQLite() { function initializeConfigFileSupport() { - if (getOptionValue('--experimental-config-file')) { + if (getOptionValue('--experimental-default-config-file') || + getOptionValue('--experimental-config-file')) { emitExperimentalWarning('--experimental-config-file'); diff --git a/src/node_config_file.cc b/src/node_config_file.cc index e25ea8ad14fc2c..d801d935a41706 100644 --- a/src/node_config_file.cc +++ b/src/node_config_file.cc @@ -8,22 +8,32 @@ namespace node { std::optional<std::string_view> ConfigReader::GetDataFromArgs( const std::vector<std::string>& args) { - constexpr std::string_view flag = "--experimental-config-file"; + constexpr std::string_view flag_path = "--experimental-config-file"; + constexpr std::string_view default_file = + "--experimental-default-config-file"; + bool has_default_config_file = false; for (auto it = args.begin(); it != args.end(); ++it) { - if (*it == flag) { + if (*it == flag_path) { // Case: "--experimental-config-file foo" if (auto next = std::next(it); next != args.end()) { return *next; - } else if (it->starts_with(flag)) { + } else if (it->starts_with(flag_path)) { // Case: "--experimental-config-file=foo" - if (it->size() > flag.size() && (*it)[flag.size()] == '=') { + if (it->size() > flag_path.size() && (*it)[flag_path.size()] == '=') { + return it->substr(flag_path.size() + 1); + } else if (*it == default_file || it->starts_with(default_file)) { + has_default_config_file = true; } + if (has_default_config_file) { + return "node.config.json"; return std::nullopt; diff --git a/src/node_options.cc b/src/node_options.cc index 89af5ff4422996..16cce0df0e2263 100644 --- a/src/node_options.cc +++ b/src/node_options.cc @@ -685,7 +685,10 @@ EnvironmentOptionsParser::EnvironmentOptionsParser() { Implies("--env-file-if-exists", "[has_env_file_string]"); AddOption("--experimental-config-file", "set config file from supplied file", - &EnvironmentOptions::experimental_config_file); + "set config file from default config file", + &EnvironmentOptions::experimental_default_config_file); AddOption("--test", "launch test runner on startup", &EnvironmentOptions::test_runner); diff --git a/src/node_options.h b/src/node_options.h index 93fbebd20e656d..baa615e310e17b 100644 --- a/src/node_options.h +++ b/src/node_options.h @@ -258,7 +258,8 @@ class EnvironmentOptions : public Options { bool report_exclude_env = false; bool report_exclude_network = false; + std::string experimental_config_file_path; + bool experimental_default_config_file = false; inline DebugOptions* get_debug_options() { return &debug_options_; } inline const DebugOptions& debug_options() const { return debug_options_; } diff --git a/test/fixtures/rc/default/node.config.json b/test/fixtures/rc/default/node.config.json index 00000000000000..54bcbfef04a947 +++ b/test/fixtures/rc/default/node.config.json + "max-http-header-size": 10 diff --git a/test/fixtures/rc/default/override.json b/test/fixtures/rc/default/override.json index 00000000000000..0f6f763cad86c6 +++ b/test/fixtures/rc/default/override.json + "max-http-header-size": 20 diff --git a/test/fixtures/rc/non-readable/node.config.json b/test/fixtures/rc/non-readable/node.config.json new file mode 100755 index 00000000000000..21e2b85fbda8fc +++ b/test/fixtures/rc/non-readable/node.config.json + "max-http-header-size": 10 + } diff --git a/test/parallel/test-config-file.js b/test/parallel/test-config-file.js index a53de3be8ed6a5..2154fd37c692c8 100644 --- a/test/parallel/test-config-file.js +++ b/test/parallel/test-config-file.js @@ -4,6 +4,8 @@ const { spawnPromisified } = require('../common'); const fixtures = require('../common/fixtures'); const { match, strictEqual } = require('node:assert'); const { test } = require('node:test'); +const { chmodSync, constants } = require('node:fs'); +const common = require('../common'); test('should handle non existing json', async () => { const result = await spawnPromisified(process.execPath, [ @@ -304,3 +306,47 @@ test('broken value in node_options', async () => { strictEqual(result.stdout, ''); strictEqual(result.code, 9); }); +test('should use node.config.json as default', async () => { + strictEqual(result.stdout, '10\n'); +test('should override node.config.json when specificied', async () => { + '--experimental-config-file', + fixtures.path('rc/default/override.json'), + strictEqual(result.stdout, '20\n'); +// Skip on windows because it doesn't support chmod changing read permissions +test('should throw an error when the file is non readable', { skip: common.isWindows }, async () => { + chmodSync(fixtures.path('rc/non-readable/node.config.json'), constants.O_RDONLY); + cwd: fixtures.path('rc/non-readable'), + match(result.stderr, /Cannot read configuration from node\.config\.json: permission denied/); + strictEqual(result.stdout, ''); + chmodSync(fixtures.path('rc/non-readable/node.config.json'), diff --git a/test/parallel/test-config-json-schema.js b/test/parallel/test-config-json-schema.js index 0872a2419c0224..5a4c1075d0fdb5 100644 --- a/test/parallel/test-config-json-schema.js +++ b/test/parallel/test-config-json-schema.js @@ -24,7 +24,7 @@ if (!common.hasIntl) { const { generateConfigJsonSchema, } = require('internal/options'); -const schemaInDoc = require('../../doc/node_config_json_schema.json'); +const schemaInDoc = require('../../doc/node-config-schema.json'); const assert = require('assert'); const schema = generateConfigJsonSchema(); @@ -35,6 +35,6 @@ const schema = generateConfigJsonSchema(); // current JSON schema. // To regenerate the JSON schema, run: // out/Release/node --expose-internals tools/doc/generate-json-schema.mjs -// And then run make doc to update the out/doc/node_config_json_schema.json file. +// And then run make doc to update the out/doc/node-config-schema.json file. assert.strictEqual(JSON.stringify(schema), JSON.stringify(schemaInDoc), 'JSON schema is outdated.' + 'Run `out/Release/node --expose-internals tools/doc/generate-json-schema.mjs` to update it.'); diff --git a/tools/doc/generate-json-schema.mjs b/tools/doc/generate-json-schema.mjs index 83a0323fa88ef1..29f15605026c9f 100644 --- a/tools/doc/generate-json-schema.mjs +++ b/tools/doc/generate-json-schema.mjs @@ -4,4 +4,4 @@ import internal from 'internal/options'; import { writeFileSync } from 'fs'; const schema = internal.generateConfigJsonSchema(); -writeFileSync('doc/node_config_json_schema.json', `${JSON.stringify(schema, null, 2)}\n`); +writeFileSync('doc/node-config-schema.json', `${JSON.stringify(schema, null, 2)}\n`);
[ "+\tcp doc/node-config-schema.json $@", "+It is possible to use the [official JSON schema](../node-config-schema.json)", "+### `--experimental-default-config-file`", "+If the `--experimental-default-config-file` flag is present, Node.js will look for a", "+`node.config.json` file in the current working directory and load it as a", "- return it->substr(flag.size() + 1);", "+ &EnvironmentOptions::experimental_config_file_path);", "+ AddOption(\"--experimental-default-config-file\",", "- std::string experimental_config_file;", "+ \"nodeOptions\": {", "+ strictEqual(result.code, 9);", "+ constants.S_IRWXU | constants.S_IRWXG | constants.S_IRWXO);" ]
[ 9, 60, 77, 85, 86, 152, 177, 178, 192, 227, 288, 290 ]
{ "additions": 114, "author": "marco-ippolito", "deletions": 22, "html_url": "https://github.com/nodejs/node/pull/57171", "issue_id": 57171, "merged_at": "2025-02-27T21:02:10Z", "omission_probability": 0.1, "pr_number": 57171, "repo": "nodejs/node", "title": "src: set default config as node.config.json", "total_changes": 136 }
851
diff --git a/src/api/encoding.cc b/src/api/encoding.cc index 52f41980507bcb..215db8882bf1ee 100644 --- a/src/api/encoding.cc +++ b/src/api/encoding.cc @@ -144,7 +144,7 @@ MaybeLocal<Value> TryEncode(Isolate* isolate, } MaybeLocal<Value> TryEncode(Isolate* isolate, const uint16_t* buf, size_t len) { - return StringBytes::Encode(isolate, buf, len).ToLocalChecked(); + return StringBytes::Encode(isolate, buf, len); } Local<Value> Encode(Isolate* isolate, diff --git a/src/api/environment.cc b/src/api/environment.cc index af5c62f64542a0..b9d619b2abfc85 100644 --- a/src/api/environment.cc +++ b/src/api/environment.cc @@ -539,8 +539,11 @@ MaybeLocal<Value> LoadEnvironment(Environment* env, return LoadEnvironment( env, [&](const StartExecutionCallbackInfo& info) -> MaybeLocal<Value> { - Local<Value> main_script = - ToV8Value(env->context(), main_script_source_utf8).ToLocalChecked(); + Local<Value> main_script; + if (!ToV8Value(env->context(), main_script_source_utf8) + .ToLocal(&main_script)) { + return {}; + } return info.run_cjs->Call( env->context(), Null(env->isolate()), 1, &main_script); }, diff --git a/src/js_stream.cc b/src/js_stream.cc index 55cbdf5bf2b0a3..cf04e5ef757593 100644 --- a/src/js_stream.cc +++ b/src/js_stream.cc @@ -117,10 +117,13 @@ int JSStream::DoWrite(WriteWrap* w, HandleScope scope(env()->isolate()); Context::Scope context_scope(env()->context()); + int value_int = UV_EPROTO; + MaybeStackBuffer<Local<Value>, 16> bufs_arr(count); for (size_t i = 0; i < count; i++) { - bufs_arr[i] = - Buffer::Copy(env(), bufs[i].base, bufs[i].len).ToLocalChecked(); + if (!Buffer::Copy(env(), bufs[i].base, bufs[i].len).ToLocal(&bufs_arr[i])) { + return value_int; + } } Local<Value> argv[] = { @@ -130,7 +133,6 @@ int JSStream::DoWrite(WriteWrap* w, TryCatchScope try_catch(env()); Local<Value> value; - int value_int = UV_EPROTO; if (!MakeCallback(env()->onwrite_string(), arraysize(argv), argv).ToLocal(&value) || diff --git a/src/js_udp_wrap.cc b/src/js_udp_wrap.cc index a4f183025df4be..51e4f8c45ffd38 100644 --- a/src/js_udp_wrap.cc +++ b/src/js_udp_wrap.cc @@ -99,8 +99,9 @@ ssize_t JSUDPWrap::Send(uv_buf_t* bufs, MaybeStackBuffer<Local<Value>, 16> buffers(nbufs); for (size_t i = 0; i < nbufs; i++) { - buffers[i] = Buffer::Copy(env(), bufs[i].base, bufs[i].len) - .ToLocalChecked(); + if (!Buffer::Copy(env(), bufs[i].base, bufs[i].len).ToLocal(&buffers[i])) { + return value_int; + } total_len += bufs[i].len; } diff --git a/src/node_contextify.cc b/src/node_contextify.cc index af05a2ca3e9208..f6b33097735b2a 100644 --- a/src/node_contextify.cc +++ b/src/node_contextify.cc @@ -1363,7 +1363,12 @@ bool ContextifyScript::EvalMachine(Local<Context> context, return false; } - args.GetReturnValue().Set(result.ToLocalChecked()); + // We checked for res being empty previously so this is a bit redundant + // but still safer than using ToLocalChecked. + Local<Value> res; + if (!result.ToLocal(&res)) return false; + + args.GetReturnValue().Set(res); return true; } diff --git a/src/node_http_common.h b/src/node_http_common.h index b58e8cb5607aba..e2e13e9972f9ce 100644 --- a/src/node_http_common.h +++ b/src/node_http_common.h @@ -414,8 +414,11 @@ class NgRcBufPointer : public MemoryRetainer { const char* header_name = reinterpret_cast<const char*>(ptr.data()); v8::Eternal<v8::String>& eternal = static_str_map[header_name]; if (eternal.IsEmpty()) { - v8::Local<v8::String> str = - GetInternalizedString(env, ptr).ToLocalChecked(); + v8::Local<v8::String> str; + if (!GetInternalizedString(env, ptr).ToLocal(&str)) { + ptr.reset(); + return {}; + } eternal.Set(env->isolate(), str); return str; } diff --git a/src/pipe_wrap.cc b/src/pipe_wrap.cc index 38a08432616e9b..2cb61215604047 100644 --- a/src/pipe_wrap.cc +++ b/src/pipe_wrap.cc @@ -53,10 +53,12 @@ MaybeLocal<Object> PipeWrap::Instantiate(Environment* env, EscapableHandleScope handle_scope(env->isolate()); AsyncHooks::DefaultTriggerAsyncIdScope trigger_scope(parent); CHECK_EQ(false, env->pipe_constructor_template().IsEmpty()); - Local<Function> constructor = env->pipe_constructor_template() - ->GetFunction(env->context()) - .ToLocalChecked(); - CHECK_EQ(false, constructor.IsEmpty()); + Local<Function> constructor; + if (!env->pipe_constructor_template() + ->GetFunction(env->context()) + .ToLocal(&constructor)) { + return {}; + } Local<Value> type_value = Int32::New(env->isolate(), type); return handle_scope.EscapeMaybe( constructor->NewInstance(env->context(), 1, &type_value)); diff --git a/src/tcp_wrap.cc b/src/tcp_wrap.cc index 7b38b51d381cc1..72e2843636ca3b 100644 --- a/src/tcp_wrap.cc +++ b/src/tcp_wrap.cc @@ -59,10 +59,12 @@ MaybeLocal<Object> TCPWrap::Instantiate(Environment* env, EscapableHandleScope handle_scope(env->isolate()); AsyncHooks::DefaultTriggerAsyncIdScope trigger_scope(parent); CHECK_EQ(env->tcp_constructor_template().IsEmpty(), false); - Local<Function> constructor = env->tcp_constructor_template() - ->GetFunction(env->context()) - .ToLocalChecked(); - CHECK_EQ(constructor.IsEmpty(), false); + Local<Function> constructor; + if (!env->tcp_constructor_template() + ->GetFunction(env->context()) + .ToLocal(&constructor)) { + return {}; + } Local<Value> type_value = Int32::New(env->isolate(), type); return handle_scope.EscapeMaybe( constructor->NewInstance(env->context(), 1, &type_value)); diff --git a/src/util.cc b/src/util.cc index 17afbec8be3b3c..c8b468443c23b3 100644 --- a/src/util.cc +++ b/src/util.cc @@ -811,8 +811,11 @@ v8::Maybe<int32_t> GetValidatedFd(Environment* env, const bool is_out_of_range = fd < 0 || fd > INT32_MAX; if (is_out_of_range || !IsSafeJsInt(input)) { - Utf8Value utf8_value( - env->isolate(), input->ToDetailString(env->context()).ToLocalChecked()); + Local<String> str; + if (!input->ToDetailString(env->context()).ToLocal(&str)) { + return v8::Nothing<int32_t>(); + } + Utf8Value utf8_value(env->isolate(), str); if (is_out_of_range && !std::isinf(fd)) { THROW_ERR_OUT_OF_RANGE(env, "The value of \"fd\" is out of range. "
diff --git a/src/api/encoding.cc b/src/api/encoding.cc index 52f41980507bcb..215db8882bf1ee 100644 --- a/src/api/encoding.cc +++ b/src/api/encoding.cc @@ -144,7 +144,7 @@ MaybeLocal<Value> TryEncode(Isolate* isolate, MaybeLocal<Value> TryEncode(Isolate* isolate, const uint16_t* buf, size_t len) { - return StringBytes::Encode(isolate, buf, len).ToLocalChecked(); + return StringBytes::Encode(isolate, buf, len); Local<Value> Encode(Isolate* isolate, diff --git a/src/api/environment.cc b/src/api/environment.cc index af5c62f64542a0..b9d619b2abfc85 100644 --- a/src/api/environment.cc +++ b/src/api/environment.cc @@ -539,8 +539,11 @@ MaybeLocal<Value> LoadEnvironment(Environment* env, return LoadEnvironment( env, [&](const StartExecutionCallbackInfo& info) -> MaybeLocal<Value> { - Local<Value> main_script = - ToV8Value(env->context(), main_script_source_utf8).ToLocalChecked(); + Local<Value> main_script; + if (!ToV8Value(env->context(), main_script_source_utf8) + .ToLocal(&main_script)) { + return {}; + } return info.run_cjs->Call( env->context(), Null(env->isolate()), 1, &main_script); }, diff --git a/src/js_stream.cc b/src/js_stream.cc index 55cbdf5bf2b0a3..cf04e5ef757593 100644 --- a/src/js_stream.cc +++ b/src/js_stream.cc @@ -117,10 +117,13 @@ int JSStream::DoWrite(WriteWrap* w, HandleScope scope(env()->isolate()); Context::Scope context_scope(env()->context()); MaybeStackBuffer<Local<Value>, 16> bufs_arr(count); for (size_t i = 0; i < count; i++) { - bufs_arr[i] = - Buffer::Copy(env(), bufs[i].base, bufs[i].len).ToLocalChecked(); Local<Value> argv[] = { @@ -130,7 +133,6 @@ int JSStream::DoWrite(WriteWrap* w, TryCatchScope try_catch(env()); Local<Value> value; - int value_int = UV_EPROTO; if (!MakeCallback(env()->onwrite_string(), arraysize(argv), argv).ToLocal(&value) || diff --git a/src/js_udp_wrap.cc b/src/js_udp_wrap.cc index a4f183025df4be..51e4f8c45ffd38 100644 --- a/src/js_udp_wrap.cc +++ b/src/js_udp_wrap.cc @@ -99,8 +99,9 @@ ssize_t JSUDPWrap::Send(uv_buf_t* bufs, MaybeStackBuffer<Local<Value>, 16> buffers(nbufs); for (size_t i = 0; i < nbufs; i++) { - buffers[i] = Buffer::Copy(env(), bufs[i].base, bufs[i].len) - .ToLocalChecked(); + if (!Buffer::Copy(env(), bufs[i].base, bufs[i].len).ToLocal(&buffers[i])) { total_len += bufs[i].len; diff --git a/src/node_contextify.cc b/src/node_contextify.cc index af05a2ca3e9208..f6b33097735b2a 100644 --- a/src/node_contextify.cc +++ b/src/node_contextify.cc @@ -1363,7 +1363,12 @@ bool ContextifyScript::EvalMachine(Local<Context> context, return false; - args.GetReturnValue().Set(result.ToLocalChecked()); + // We checked for res being empty previously so this is a bit redundant + // but still safer than using ToLocalChecked. + Local<Value> res; + if (!result.ToLocal(&res)) return false; return true; diff --git a/src/node_http_common.h b/src/node_http_common.h index b58e8cb5607aba..e2e13e9972f9ce 100644 --- a/src/node_http_common.h +++ b/src/node_http_common.h @@ -414,8 +414,11 @@ class NgRcBufPointer : public MemoryRetainer { const char* header_name = reinterpret_cast<const char*>(ptr.data()); v8::Eternal<v8::String>& eternal = static_str_map[header_name]; if (eternal.IsEmpty()) { - v8::Local<v8::String> str = - GetInternalizedString(env, ptr).ToLocalChecked(); + v8::Local<v8::String> str; + if (!GetInternalizedString(env, ptr).ToLocal(&str)) { + ptr.reset(); + return {}; + } eternal.Set(env->isolate(), str); return str; } diff --git a/src/pipe_wrap.cc b/src/pipe_wrap.cc index 38a08432616e9b..2cb61215604047 100644 --- a/src/pipe_wrap.cc +++ b/src/pipe_wrap.cc @@ -53,10 +53,12 @@ MaybeLocal<Object> PipeWrap::Instantiate(Environment* env, CHECK_EQ(false, env->pipe_constructor_template().IsEmpty()); - Local<Function> constructor = env->pipe_constructor_template() - CHECK_EQ(false, constructor.IsEmpty()); + if (!env->pipe_constructor_template() diff --git a/src/tcp_wrap.cc b/src/tcp_wrap.cc index 7b38b51d381cc1..72e2843636ca3b 100644 --- a/src/tcp_wrap.cc +++ b/src/tcp_wrap.cc @@ -59,10 +59,12 @@ MaybeLocal<Object> TCPWrap::Instantiate(Environment* env, CHECK_EQ(env->tcp_constructor_template().IsEmpty(), false); - Local<Function> constructor = env->tcp_constructor_template() - CHECK_EQ(constructor.IsEmpty(), false); + if (!env->tcp_constructor_template() diff --git a/src/util.cc b/src/util.cc index 17afbec8be3b3c..c8b468443c23b3 100644 --- a/src/util.cc +++ b/src/util.cc @@ -811,8 +811,11 @@ v8::Maybe<int32_t> GetValidatedFd(Environment* env, const bool is_out_of_range = fd < 0 || fd > INT32_MAX; if (is_out_of_range || !IsSafeJsInt(input)) { - Utf8Value utf8_value( - env->isolate(), input->ToDetailString(env->context()).ToLocalChecked()); + Local<String> str; + if (!input->ToDetailString(env->context()).ToLocal(&str)) { + return v8::Nothing<int32_t>(); + Utf8Value utf8_value(env->isolate(), str); if (is_out_of_range && !std::isinf(fd)) { THROW_ERR_OUT_OF_RANGE(env, "The value of \"fd\" is out of range. "
[ "+ int value_int = UV_EPROTO;", "+ if (!Buffer::Copy(env(), bufs[i].base, bufs[i].len).ToLocal(&bufs_arr[i])) {", "+ args.GetReturnValue().Set(res);" ]
[ 39, 45, 89 ]
{ "additions": 42, "author": "jasnell", "deletions": 21, "html_url": "https://github.com/nodejs/node/pull/57852", "issue_id": 57852, "merged_at": "2025-04-21T03:01:30Z", "omission_probability": 0.1, "pr_number": 57852, "repo": "nodejs/node", "title": "src: fixup errorhandling more in various places", "total_changes": 63 }
852
diff --git a/doc/api/sqlite.md b/doc/api/sqlite.md index 1edea0afdf372a..0f077ed7981484 100644 --- a/doc/api/sqlite.md +++ b/doc/api/sqlite.md @@ -291,6 +291,15 @@ added: v23.11.0 * {boolean} Whether the database is currently open or not. +### `database.isTransaction` + +<!-- YAML +added: REPLACEME +--> + +* {boolean} Whether the database is currently within a transaction. This method + is a wrapper around [`sqlite3_get_autocommit()`][]. + ### `database.open()` <!-- YAML @@ -839,6 +848,7 @@ resolution handler passed to [`database.applyChangeset()`][]. See also [`sqlite3_create_window_function()`]: https://www.sqlite.org/c3ref/create_function.html [`sqlite3_exec()`]: https://www.sqlite.org/c3ref/exec.html [`sqlite3_expanded_sql()`]: https://www.sqlite.org/c3ref/expanded_sql.html +[`sqlite3_get_autocommit()`]: https://sqlite.org/c3ref/get_autocommit.html [`sqlite3_last_insert_rowid()`]: https://www.sqlite.org/c3ref/last_insert_rowid.html [`sqlite3_load_extension()`]: https://www.sqlite.org/c3ref/load_extension.html [`sqlite3_prepare_v2()`]: https://www.sqlite.org/c3ref/prepare.html diff --git a/src/node_sqlite.cc b/src/node_sqlite.cc index 3127c29462ac6a..e5ee60a152ce8c 100644 --- a/src/node_sqlite.cc +++ b/src/node_sqlite.cc @@ -979,6 +979,15 @@ void DatabaseSync::IsOpenGetter(const FunctionCallbackInfo<Value>& args) { args.GetReturnValue().Set(db->IsOpen()); } +void DatabaseSync::IsTransactionGetter( + const FunctionCallbackInfo<Value>& args) { + DatabaseSync* db; + ASSIGN_OR_RETURN_UNWRAP(&db, args.This()); + Environment* env = Environment::GetCurrent(args); + THROW_AND_RETURN_ON_BAD_STATE(env, !db->IsOpen(), "database is not open"); + args.GetReturnValue().Set(sqlite3_get_autocommit(db->connection_) == 0); +} + void DatabaseSync::Close(const FunctionCallbackInfo<Value>& args) { DatabaseSync* db; ASSIGN_OR_RETURN_UNWRAP(&db, args.This()); @@ -2623,6 +2632,10 @@ static void Initialize(Local<Object> target, db_tmpl, FIXED_ONE_BYTE_STRING(isolate, "isOpen"), DatabaseSync::IsOpenGetter); + SetSideEffectFreeGetter(isolate, + db_tmpl, + FIXED_ONE_BYTE_STRING(isolate, "isTransaction"), + DatabaseSync::IsTransactionGetter); SetConstructorFunction(context, target, "DatabaseSync", db_tmpl); SetConstructorFunction(context, target, diff --git a/src/node_sqlite.h b/src/node_sqlite.h index dee4144d40028a..1a5704b7f7799b 100644 --- a/src/node_sqlite.h +++ b/src/node_sqlite.h @@ -61,6 +61,8 @@ class DatabaseSync : public BaseObject { static void New(const v8::FunctionCallbackInfo<v8::Value>& args); static void Open(const v8::FunctionCallbackInfo<v8::Value>& args); static void IsOpenGetter(const v8::FunctionCallbackInfo<v8::Value>& args); + static void IsTransactionGetter( + const v8::FunctionCallbackInfo<v8::Value>& args); static void Close(const v8::FunctionCallbackInfo<v8::Value>& args); static void Prepare(const v8::FunctionCallbackInfo<v8::Value>& args); static void Exec(const v8::FunctionCallbackInfo<v8::Value>& args); diff --git a/test/parallel/test-sqlite-database-sync.js b/test/parallel/test-sqlite-database-sync.js index 3ae726004a68d6..9f707bbb21acb1 100644 --- a/test/parallel/test-sqlite-database-sync.js +++ b/test/parallel/test-sqlite-database-sync.js @@ -324,3 +324,40 @@ suite('DatabaseSync.prototype.exec()', () => { }); }); }); + +suite('DatabaseSync.prototype.isTransaction', () => { + test('correctly detects a committed transaction', (t) => { + const db = new DatabaseSync(':memory:'); + + t.assert.strictEqual(db.isTransaction, false); + db.exec('BEGIN'); + t.assert.strictEqual(db.isTransaction, true); + db.exec('CREATE TABLE foo (id INTEGER PRIMARY KEY)'); + t.assert.strictEqual(db.isTransaction, true); + db.exec('COMMIT'); + t.assert.strictEqual(db.isTransaction, false); + }); + + test('correctly detects a rolled back transaction', (t) => { + const db = new DatabaseSync(':memory:'); + + t.assert.strictEqual(db.isTransaction, false); + db.exec('BEGIN'); + t.assert.strictEqual(db.isTransaction, true); + db.exec('CREATE TABLE foo (id INTEGER PRIMARY KEY)'); + t.assert.strictEqual(db.isTransaction, true); + db.exec('ROLLBACK'); + t.assert.strictEqual(db.isTransaction, false); + }); + + test('throws if database is not open', (t) => { + const db = new DatabaseSync(nextDb(), { open: false }); + + t.assert.throws(() => { + return db.isTransaction; + }, { + code: 'ERR_INVALID_STATE', + message: /database is not open/, + }); + }); +});
diff --git a/doc/api/sqlite.md b/doc/api/sqlite.md index 1edea0afdf372a..0f077ed7981484 100644 --- a/doc/api/sqlite.md +++ b/doc/api/sqlite.md @@ -291,6 +291,15 @@ added: v23.11.0 * {boolean} Whether the database is currently open or not. +### `database.isTransaction` +<!-- YAML +added: REPLACEME +--> +* {boolean} Whether the database is currently within a transaction. This method + is a wrapper around [`sqlite3_get_autocommit()`][]. ### `database.open()` <!-- YAML @@ -839,6 +848,7 @@ resolution handler passed to [`database.applyChangeset()`][]. See also [`sqlite3_create_window_function()`]: https://www.sqlite.org/c3ref/create_function.html [`sqlite3_exec()`]: https://www.sqlite.org/c3ref/exec.html [`sqlite3_expanded_sql()`]: https://www.sqlite.org/c3ref/expanded_sql.html +[`sqlite3_get_autocommit()`]: https://sqlite.org/c3ref/get_autocommit.html [`sqlite3_last_insert_rowid()`]: https://www.sqlite.org/c3ref/last_insert_rowid.html [`sqlite3_load_extension()`]: https://www.sqlite.org/c3ref/load_extension.html [`sqlite3_prepare_v2()`]: https://www.sqlite.org/c3ref/prepare.html diff --git a/src/node_sqlite.cc b/src/node_sqlite.cc index 3127c29462ac6a..e5ee60a152ce8c 100644 --- a/src/node_sqlite.cc +++ b/src/node_sqlite.cc @@ -979,6 +979,15 @@ void DatabaseSync::IsOpenGetter(const FunctionCallbackInfo<Value>& args) { args.GetReturnValue().Set(db->IsOpen()); } +void DatabaseSync::IsTransactionGetter( + DatabaseSync* db; + THROW_AND_RETURN_ON_BAD_STATE(env, !db->IsOpen(), "database is not open"); +} void DatabaseSync::Close(const FunctionCallbackInfo<Value>& args) { DatabaseSync* db; ASSIGN_OR_RETURN_UNWRAP(&db, args.This()); @@ -2623,6 +2632,10 @@ static void Initialize(Local<Object> target, db_tmpl, FIXED_ONE_BYTE_STRING(isolate, "isOpen"), DatabaseSync::IsOpenGetter); + SetSideEffectFreeGetter(isolate, + db_tmpl, + FIXED_ONE_BYTE_STRING(isolate, "isTransaction"), + DatabaseSync::IsTransactionGetter); SetConstructorFunction(context, target, "DatabaseSync", db_tmpl); SetConstructorFunction(context, target, diff --git a/src/node_sqlite.h b/src/node_sqlite.h index dee4144d40028a..1a5704b7f7799b 100644 --- a/src/node_sqlite.h +++ b/src/node_sqlite.h @@ -61,6 +61,8 @@ class DatabaseSync : public BaseObject { static void New(const v8::FunctionCallbackInfo<v8::Value>& args); static void Open(const v8::FunctionCallbackInfo<v8::Value>& args); static void IsOpenGetter(const v8::FunctionCallbackInfo<v8::Value>& args); + static void IsTransactionGetter( + const v8::FunctionCallbackInfo<v8::Value>& args); static void Close(const v8::FunctionCallbackInfo<v8::Value>& args); static void Prepare(const v8::FunctionCallbackInfo<v8::Value>& args); static void Exec(const v8::FunctionCallbackInfo<v8::Value>& args); diff --git a/test/parallel/test-sqlite-database-sync.js b/test/parallel/test-sqlite-database-sync.js index 3ae726004a68d6..9f707bbb21acb1 100644 --- a/test/parallel/test-sqlite-database-sync.js +++ b/test/parallel/test-sqlite-database-sync.js @@ -324,3 +324,40 @@ suite('DatabaseSync.prototype.exec()', () => { }); }); }); +suite('DatabaseSync.prototype.isTransaction', () => { + test('correctly detects a committed transaction', (t) => { + db.exec('COMMIT'); + test('correctly detects a rolled back transaction', (t) => { + db.exec('ROLLBACK'); + test('throws if database is not open', (t) => { + const db = new DatabaseSync(nextDb(), { open: false }); + return db.isTransaction; + }, { + code: 'ERR_INVALID_STATE', + message: /database is not open/, + }); +});
[ "+ const FunctionCallbackInfo<Value>& args) {", "+ ASSIGN_OR_RETURN_UNWRAP(&db, args.This());", "+ Environment* env = Environment::GetCurrent(args);", "+ args.GetReturnValue().Set(sqlite3_get_autocommit(db->connection_) == 0);", "+ t.assert.throws(() => {" ]
[ 37, 39, 40, 42, 109 ]
{ "additions": 62, "author": "cjihrig", "deletions": 0, "html_url": "https://github.com/nodejs/node/pull/57925", "issue_id": 57925, "merged_at": "2025-04-20T15:57:42Z", "omission_probability": 0.1, "pr_number": 57925, "repo": "nodejs/node", "title": "sqlite: add getter to detect transactions", "total_changes": 62 }
853
diff --git a/lib/internal/util.js b/lib/internal/util.js index e690d7f4323bcc..7a928adb71c043 100644 --- a/lib/internal/util.js +++ b/lib/internal/util.js @@ -8,6 +8,7 @@ const { Error, ErrorCaptureStackTrace, FunctionPrototypeCall, + NumberParseInt, ObjectDefineProperties, ObjectDefineProperty, ObjectFreeze, @@ -33,7 +34,9 @@ const { SafeSet, SafeWeakMap, SafeWeakRef, + StringPrototypeIncludes, StringPrototypeReplace, + StringPrototypeSlice, StringPrototypeToLowerCase, StringPrototypeToUpperCase, Symbol, @@ -795,6 +798,59 @@ function setupCoverageHooks(dir) { return coverageDirectory; } +// Returns the number of ones in the binary representation of the decimal +// number. +function countBinaryOnes(n) { + // Count the number of bits set in parallel, which is faster than looping + n = n - ((n >>> 1) & 0x55555555); + n = (n & 0x33333333) + ((n >>> 2) & 0x33333333); + return ((n + (n >>> 4) & 0xF0F0F0F) * 0x1010101) >>> 24; +} + +function getCIDR(address, netmask, family) { + let ones = 0; + let split = '.'; + let range = 10; + let groupLength = 8; + let hasZeros = false; + let lastPos = 0; + + if (family === 'IPv6') { + split = ':'; + range = 16; + groupLength = 16; + } + + for (let i = 0; i < netmask.length; i++) { + if (netmask[i] !== split) { + if (i + 1 < netmask.length) { + continue; + } + i++; + } + const part = StringPrototypeSlice(netmask, lastPos, i); + lastPos = i + 1; + if (part !== '') { + if (hasZeros) { + if (part !== '0') { + return null; + } + } else { + const binary = NumberParseInt(part, range); + const binaryOnes = countBinaryOnes(binary); + ones += binaryOnes; + if (binaryOnes !== groupLength) { + if (StringPrototypeIncludes(binary.toString(2), '01')) { + return null; + } + hasZeros = true; + } + } + } + } + + return `${address}/${ones}`; +} const handleTypes = ['TCP', 'TTY', 'UDP', 'FILE', 'PIPE', 'UNKNOWN']; function guessHandleType(fd) { @@ -861,6 +917,7 @@ module.exports = { filterDuplicateStrings, filterOwnProperties, getConstructorOf, + getCIDR, getCWDURL, getInternalGlobal, getStructuredStack, diff --git a/lib/os.js b/lib/os.js index 4f8dda1531b5dc..c44147f0e1170d 100644 --- a/lib/os.js +++ b/lib/os.js @@ -24,7 +24,6 @@ const { ArrayPrototypePush, Float64Array, - NumberParseInt, ObjectDefineProperties, StringPrototypeSlice, SymbolToPrimitive, @@ -40,6 +39,7 @@ const { }, hideStackFrames, } = require('internal/errors'); +const { getCIDR } = require('internal/util'); const { validateInt32 } = require('internal/validators'); const { @@ -202,60 +202,6 @@ function endianness() { } endianness[SymbolToPrimitive] = () => kEndianness; -// Returns the number of ones in the binary representation of the decimal -// number. -function countBinaryOnes(n) { - // Count the number of bits set in parallel, which is faster than looping - n = n - ((n >>> 1) & 0x55555555); - n = (n & 0x33333333) + ((n >>> 2) & 0x33333333); - return ((n + (n >>> 4) & 0xF0F0F0F) * 0x1010101) >>> 24; -} - -function getCIDR(address, netmask, family) { - let ones = 0; - let split = '.'; - let range = 10; - let groupLength = 8; - let hasZeros = false; - let lastPos = 0; - - if (family === 'IPv6') { - split = ':'; - range = 16; - groupLength = 16; - } - - for (let i = 0; i < netmask.length; i++) { - if (netmask[i] !== split) { - if (i + 1 < netmask.length) { - continue; - } - i++; - } - const part = StringPrototypeSlice(netmask, lastPos, i); - lastPos = i + 1; - if (part !== '') { - if (hasZeros) { - if (part !== '0') { - return null; - } - } else { - const binary = NumberParseInt(part, range); - const binaryOnes = countBinaryOnes(binary); - ones += binaryOnes; - if (binaryOnes !== groupLength) { - if ((binary & 1) !== 0) { - return null; - } - hasZeros = true; - } - } - } - } - - return `${address}/${ones}`; -} - /** * @returns {Record<string, Array<{ * address: string, diff --git a/test/parallel/test-internal-util-getCIDR.js b/test/parallel/test-internal-util-getCIDR.js new file mode 100644 index 00000000000000..f7dd3c1194b6a2 --- /dev/null +++ b/test/parallel/test-internal-util-getCIDR.js @@ -0,0 +1,23 @@ +// Flags: --expose-internals +'use strict'; +require('../common'); + +// These are tests that verify that the subnetmask is used +// to create the correct CIDR address. +// Tests that it returns null if the subnetmask is not in the correct format. +// (ref: https://www.rfc-editor.org/rfc/rfc1878) + +const assert = require('node:assert'); +const { getCIDR } = require('internal/util'); + +assert.strictEqual(getCIDR('127.0.0.1', '255.0.0.0', 'IPv4'), '127.0.0.1/8'); +assert.strictEqual(getCIDR('127.0.0.1', '255.255.0.0', 'IPv4'), '127.0.0.1/16'); + +// 242 = 11110010(2) +assert.strictEqual(getCIDR('127.0.0.1', '242.0.0.0', 'IPv4'), null); + +assert.strictEqual(getCIDR('::1', 'ffff:ffff:ffff:ffff::', 'IPv6'), '::1/64'); +assert.strictEqual(getCIDR('::1', 'ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff', 'IPv6'), '::1/128'); + +// ff00:ffff = 11111111 00000000 : 11111111 11111111(2) +assert.strictEqual(getCIDR('::1', 'ffff:ff00:ffff::', 'IPv6'), null);
diff --git a/lib/internal/util.js b/lib/internal/util.js index e690d7f4323bcc..7a928adb71c043 100644 --- a/lib/internal/util.js +++ b/lib/internal/util.js @@ -8,6 +8,7 @@ const { Error, ErrorCaptureStackTrace, FunctionPrototypeCall, + NumberParseInt, ObjectDefineProperty, ObjectFreeze, @@ -33,7 +34,9 @@ const { SafeSet, SafeWeakMap, SafeWeakRef, + StringPrototypeIncludes, StringPrototypeReplace, + StringPrototypeSlice, StringPrototypeToLowerCase, StringPrototypeToUpperCase, Symbol, @@ -795,6 +798,59 @@ function setupCoverageHooks(dir) { return coverageDirectory; +// Returns the number of ones in the binary representation of the decimal +// number. +function countBinaryOnes(n) { + // Count the number of bits set in parallel, which is faster than looping + n = n - ((n >>> 1) & 0x55555555); + n = (n & 0x33333333) + ((n >>> 2) & 0x33333333); + return ((n + (n >>> 4) & 0xF0F0F0F) * 0x1010101) >>> 24; +function getCIDR(address, netmask, family) { + let range = 10; + let groupLength = 8; + let hasZeros = false; + let lastPos = 0; + if (family === 'IPv6') { + split = ':'; + range = 16; + groupLength = 16; + for (let i = 0; i < netmask.length; i++) { + if (netmask[i] !== split) { + if (i + 1 < netmask.length) { + continue; + i++; + const part = StringPrototypeSlice(netmask, lastPos, i); + lastPos = i + 1; + if (hasZeros) { + if (part !== '0') { + return null; + const binary = NumberParseInt(part, range); + const binaryOnes = countBinaryOnes(binary); + ones += binaryOnes; + if (binaryOnes !== groupLength) { + if (StringPrototypeIncludes(binary.toString(2), '01')) { + return null; + } + hasZeros = true; + return `${address}/${ones}`; const handleTypes = ['TCP', 'TTY', 'UDP', 'FILE', 'PIPE', 'UNKNOWN']; function guessHandleType(fd) { @@ -861,6 +917,7 @@ module.exports = { filterDuplicateStrings, filterOwnProperties, getConstructorOf, + getCIDR, getCWDURL, getInternalGlobal, getStructuredStack, diff --git a/lib/os.js b/lib/os.js index 4f8dda1531b5dc..c44147f0e1170d 100644 --- a/lib/os.js +++ b/lib/os.js @@ -24,7 +24,6 @@ ArrayPrototypePush, Float64Array, - NumberParseInt, StringPrototypeSlice, SymbolToPrimitive, @@ -40,6 +39,7 @@ const { }, hideStackFrames, } = require('internal/errors'); const { validateInt32 } = require('internal/validators'); @@ -202,60 +202,6 @@ function endianness() { endianness[SymbolToPrimitive] = () => kEndianness; -// Returns the number of ones in the binary representation of the decimal -// number. -function countBinaryOnes(n) { - // Count the number of bits set in parallel, which is faster than looping - n = n - ((n >>> 1) & 0x55555555); - n = (n & 0x33333333) + ((n >>> 2) & 0x33333333); -function getCIDR(address, netmask, family) { - let ones = 0; - let split = '.'; - let range = 10; - let groupLength = 8; - let hasZeros = false; - if (family === 'IPv6') { - split = ':'; - range = 16; - groupLength = 16; - if (netmask[i] !== split) { - if (i + 1 < netmask.length) { - continue; - i++; - const part = StringPrototypeSlice(netmask, lastPos, i); - lastPos = i + 1; - if (part !== '') { - if (hasZeros) { - if (part !== '0') { - return null; - } else { - const binary = NumberParseInt(part, range); - const binaryOnes = countBinaryOnes(binary); - ones += binaryOnes; - if (binaryOnes !== groupLength) { - if ((binary & 1) !== 0) { - return null; - } - hasZeros = true; - return `${address}/${ones}`; /** * @returns {Record<string, Array<{ * address: string, diff --git a/test/parallel/test-internal-util-getCIDR.js b/test/parallel/test-internal-util-getCIDR.js new file mode 100644 index 00000000000000..f7dd3c1194b6a2 --- /dev/null +++ b/test/parallel/test-internal-util-getCIDR.js @@ -0,0 +1,23 @@ +// Flags: --expose-internals +require('../common'); +// These are tests that verify that the subnetmask is used +// to create the correct CIDR address. +// Tests that it returns null if the subnetmask is not in the correct format. +// (ref: https://www.rfc-editor.org/rfc/rfc1878) +const assert = require('node:assert'); +assert.strictEqual(getCIDR('127.0.0.1', '255.0.0.0', 'IPv4'), '127.0.0.1/8'); +assert.strictEqual(getCIDR('127.0.0.1', '255.255.0.0', 'IPv4'), '127.0.0.1/16'); +// 242 = 11110010(2) +assert.strictEqual(getCIDR('127.0.0.1', '242.0.0.0', 'IPv4'), null); +assert.strictEqual(getCIDR('::1', 'ffff:ffff:ffff:ffff::', 'IPv6'), '::1/64'); +assert.strictEqual(getCIDR('::1', 'ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff', 'IPv6'), '::1/128'); +// ff00:ffff = 11111111 00000000 : 11111111 11111111(2) +assert.strictEqual(getCIDR('::1', 'ffff:ff00:ffff::', 'IPv6'), null);
[ "+ let ones = 0;", "+ let split = '.';", "+ if (part !== '') {", "+ } else {", "- return ((n + (n >>> 4) & 0xF0F0F0F) * 0x1010101) >>> 24;", "- let lastPos = 0;", "- for (let i = 0; i < netmask.length; i++) {", "+'use strict';" ]
[ 36, 37, 58, 63, 120, 129, 137, 178 ]
{ "additions": 81, "author": "HBSPS", "deletions": 55, "html_url": "https://github.com/nodejs/node/pull/57324", "issue_id": 57324, "merged_at": "2025-04-20T05:54:49Z", "omission_probability": 0.1, "pr_number": 57324, "repo": "nodejs/node", "title": "os: fix netmask format check condition in getCIDR function", "total_changes": 136 }
854
diff --git a/src/node_api.cc b/src/node_api.cc index 1638d096969826..5c85ef063ecd77 100644 --- a/src/node_api.cc +++ b/src/node_api.cc @@ -20,6 +20,52 @@ #include <cstring> #include <memory> +namespace v8impl { +static void ThrowNodeApiVersionError(node::Environment* node_env, + const char* module_name, + int32_t module_api_version) { + std::string error_message; + error_message += module_name; + error_message += " requires Node-API version "; + error_message += std::to_string(module_api_version); + error_message += ", but this version of Node.js only supports version "; + error_message += NODE_STRINGIFY(NODE_API_SUPPORTED_VERSION_MAX) " add-ons."; + node_env->ThrowError(error_message.c_str()); +} +} // namespace v8impl + +/*static*/ napi_env node_napi_env__::New(v8::Local<v8::Context> context, + const std::string& module_filename, + int32_t module_api_version) { + node_napi_env result; + + // Validate module_api_version. + if (module_api_version < NODE_API_DEFAULT_MODULE_API_VERSION) { + module_api_version = NODE_API_DEFAULT_MODULE_API_VERSION; + } else if (module_api_version > NODE_API_SUPPORTED_VERSION_MAX && + module_api_version != NAPI_VERSION_EXPERIMENTAL) { + node::Environment* node_env = node::Environment::GetCurrent(context); + CHECK_NOT_NULL(node_env); + v8impl::ThrowNodeApiVersionError( + node_env, module_filename.c_str(), module_api_version); + return nullptr; + } + + result = new node_napi_env__(context, module_filename, module_api_version); + // TODO(addaleax): There was previously code that tried to delete the + // napi_env when its v8::Context was garbage collected; + // However, as long as N-API addons using this napi_env are in place, + // the Context needs to be accessible and alive. + // Ideally, we'd want an on-addon-unload hook that takes care of this + // once all N-API addons using this napi_env are unloaded. + // For now, a per-Environment cleanup hook is the best we can do. + result->node_env()->AddCleanupHook( + [](void* arg) { static_cast<napi_env>(arg)->Unref(); }, + static_cast<void*>(result)); + + return result; +} + node_napi_env__::node_napi_env__(v8::Local<v8::Context> context, const std::string& module_filename, int32_t module_api_version) @@ -152,50 +198,6 @@ class BufferFinalizer : private Finalizer { ~BufferFinalizer() { env()->Unref(); } }; -void ThrowNodeApiVersionError(node::Environment* node_env, - const char* module_name, - int32_t module_api_version) { - std::string error_message; - error_message += module_name; - error_message += " requires Node-API version "; - error_message += std::to_string(module_api_version); - error_message += ", but this version of Node.js only supports version "; - error_message += NODE_STRINGIFY(NODE_API_SUPPORTED_VERSION_MAX) " add-ons."; - node_env->ThrowError(error_message.c_str()); -} - -inline napi_env NewEnv(v8::Local<v8::Context> context, - const std::string& module_filename, - int32_t module_api_version) { - node_napi_env result; - - // Validate module_api_version. - if (module_api_version < NODE_API_DEFAULT_MODULE_API_VERSION) { - module_api_version = NODE_API_DEFAULT_MODULE_API_VERSION; - } else if (module_api_version > NODE_API_SUPPORTED_VERSION_MAX && - module_api_version != NAPI_VERSION_EXPERIMENTAL) { - node::Environment* node_env = node::Environment::GetCurrent(context); - CHECK_NOT_NULL(node_env); - ThrowNodeApiVersionError( - node_env, module_filename.c_str(), module_api_version); - return nullptr; - } - - result = new node_napi_env__(context, module_filename, module_api_version); - // TODO(addaleax): There was previously code that tried to delete the - // napi_env when its v8::Context was garbage collected; - // However, as long as N-API addons using this napi_env are in place, - // the Context needs to be accessible and alive. - // Ideally, we'd want an on-addon-unload hook that takes care of this - // once all N-API addons using this napi_env are unloaded. - // For now, a per-Environment cleanup hook is the best we can do. - result->node_env()->AddCleanupHook( - [](void* arg) { static_cast<napi_env>(arg)->Unref(); }, - static_cast<void*>(result)); - - return result; -} - class ThreadSafeFunction : public node::AsyncResource { public: ThreadSafeFunction(v8::Local<v8::Function> func, @@ -728,7 +730,8 @@ void napi_module_register_by_symbol(v8::Local<v8::Object> exports, } // Create a new napi_env for this specific module. - napi_env env = v8impl::NewEnv(context, module_filename, module_api_version); + napi_env env = + node_napi_env__::New(context, module_filename, module_api_version); napi_value _exports = nullptr; env->CallIntoModule([&](napi_env env) { diff --git a/src/node_api_internals.h b/src/node_api_internals.h index 25f6b291902024..21d0a1d25e83e9 100644 --- a/src/node_api_internals.h +++ b/src/node_api_internals.h @@ -9,6 +9,10 @@ #include "util-inl.h" struct node_napi_env__ : public napi_env__ { + static napi_env New(v8::Local<v8::Context> context, + const std::string& module_filename, + int32_t module_api_version); + node_napi_env__(v8::Local<v8::Context> context, const std::string& module_filename, int32_t module_api_version);
diff --git a/src/node_api.cc b/src/node_api.cc index 1638d096969826..5c85ef063ecd77 100644 --- a/src/node_api.cc +++ b/src/node_api.cc @@ -20,6 +20,52 @@ #include <cstring> #include <memory> +namespace v8impl { +static void ThrowNodeApiVersionError(node::Environment* node_env, + const char* module_name, + int32_t module_api_version) { + std::string error_message; + error_message += module_name; + error_message += " requires Node-API version "; + error_message += std::to_string(module_api_version); + error_message += ", but this version of Node.js only supports version "; + error_message += NODE_STRINGIFY(NODE_API_SUPPORTED_VERSION_MAX) " add-ons."; + node_env->ThrowError(error_message.c_str()); +} // namespace v8impl +/*static*/ napi_env node_napi_env__::New(v8::Local<v8::Context> context, + int32_t module_api_version) { + node_napi_env result; + // Validate module_api_version. + if (module_api_version < NODE_API_DEFAULT_MODULE_API_VERSION) { + module_api_version = NODE_API_DEFAULT_MODULE_API_VERSION; + } else if (module_api_version > NODE_API_SUPPORTED_VERSION_MAX && + module_api_version != NAPI_VERSION_EXPERIMENTAL) { + node::Environment* node_env = node::Environment::GetCurrent(context); + CHECK_NOT_NULL(node_env); + v8impl::ThrowNodeApiVersionError( + node_env, module_filename.c_str(), module_api_version); + return nullptr; + } + result = new node_napi_env__(context, module_filename, module_api_version); + // TODO(addaleax): There was previously code that tried to delete the + // However, as long as N-API addons using this napi_env are in place, + // the Context needs to be accessible and alive. + // Ideally, we'd want an on-addon-unload hook that takes care of this + // once all N-API addons using this napi_env are unloaded. + // For now, a per-Environment cleanup hook is the best we can do. + result->node_env()->AddCleanupHook( + [](void* arg) { static_cast<napi_env>(arg)->Unref(); }, + return result; node_napi_env__::node_napi_env__(v8::Local<v8::Context> context, const std::string& module_filename, int32_t module_api_version) @@ -152,50 +198,6 @@ class BufferFinalizer : private Finalizer { ~BufferFinalizer() { env()->Unref(); } }; -void ThrowNodeApiVersionError(node::Environment* node_env, - const char* module_name, - int32_t module_api_version) { - std::string error_message; - error_message += module_name; - error_message += " requires Node-API version "; - error_message += std::to_string(module_api_version); - error_message += ", but this version of Node.js only supports version "; - error_message += NODE_STRINGIFY(NODE_API_SUPPORTED_VERSION_MAX) " add-ons."; - node_env->ThrowError(error_message.c_str()); -inline napi_env NewEnv(v8::Local<v8::Context> context, - const std::string& module_filename, - int32_t module_api_version) { - node_napi_env result; - // Validate module_api_version. - if (module_api_version < NODE_API_DEFAULT_MODULE_API_VERSION) { - module_api_version = NODE_API_DEFAULT_MODULE_API_VERSION; - } else if (module_api_version > NODE_API_SUPPORTED_VERSION_MAX && - module_api_version != NAPI_VERSION_EXPERIMENTAL) { - CHECK_NOT_NULL(node_env); - node_env, module_filename.c_str(), module_api_version); - return nullptr; - } - result = new node_napi_env__(context, module_filename, module_api_version); - // TODO(addaleax): There was previously code that tried to delete the - // napi_env when its v8::Context was garbage collected; - // However, as long as N-API addons using this napi_env are in place, - // the Context needs to be accessible and alive. - // Ideally, we'd want an on-addon-unload hook that takes care of this - // once all N-API addons using this napi_env are unloaded. - [](void* arg) { static_cast<napi_env>(arg)->Unref(); }, - static_cast<void*>(result)); - return result; class ThreadSafeFunction : public node::AsyncResource { public: ThreadSafeFunction(v8::Local<v8::Function> func, @@ -728,7 +730,8 @@ void napi_module_register_by_symbol(v8::Local<v8::Object> exports, } // Create a new napi_env for this specific module. + napi_env env = + node_napi_env__::New(context, module_filename, module_api_version); napi_value _exports = nullptr; env->CallIntoModule([&](napi_env env) { diff --git a/src/node_api_internals.h b/src/node_api_internals.h index 25f6b291902024..21d0a1d25e83e9 100644 --- a/src/node_api_internals.h +++ b/src/node_api_internals.h @@ -9,6 +9,10 @@ #include "util-inl.h" struct node_napi_env__ : public napi_env__ { + static napi_env New(v8::Local<v8::Context> context, + int32_t module_api_version); node_napi_env__(v8::Local<v8::Context> context, const std::string& module_filename, int32_t module_api_version);
[ "+ const std::string& module_filename,", "+ // napi_env when its v8::Context was garbage collected;", "+ static_cast<void*>(result));", "- node::Environment* node_env = node::Environment::GetCurrent(context);", "- ThrowNodeApiVersionError(", "- // For now, a per-Environment cleanup hook is the best we can do.", "- result->node_env()->AddCleanupHook(", "- napi_env env = v8impl::NewEnv(context, module_filename, module_api_version);", "+ const std::string& module_filename," ]
[ 23, 41, 49, 83, 85, 97, 98, 112, 127 ]
{ "additions": 52, "author": "vmoroz", "deletions": 45, "html_url": "https://github.com/nodejs/node/pull/57834", "issue_id": 57834, "merged_at": "2025-04-19T17:03:54Z", "omission_probability": 0.1, "pr_number": 57834, "repo": "nodejs/node", "title": "node-api: convert NewEnv to node_napi_env__::New", "total_changes": 97 }
855
diff --git a/doc/api/sqlite.md b/doc/api/sqlite.md index 07916addeac91a..df472d4c7899e0 100644 --- a/doc/api/sqlite.md +++ b/doc/api/sqlite.md @@ -182,11 +182,14 @@ added: * `useBigIntArguments` {boolean} If `true`, integer arguments to `function` are converted to `BigInt`s. If `false`, integer arguments are passed as JavaScript numbers. **Default:** `false`. - * `varargs` {boolean} If `true`, `function` can accept a variable number of - arguments. If `false`, `function` must be invoked with exactly - `function.length` arguments. **Default:** `false`. + * `varargs` {boolean} If `true`, `function` may be invoked with any number of + arguments (between zero and [`SQLITE_MAX_FUNCTION_ARG`][]). If `false`, + `function` must be invoked with exactly `function.length` arguments. + **Default:** `false`. * `function` {Function} The JavaScript function to call when the SQLite - function is invoked. + function is invoked. The return value of this function should be a valid + SQLite data type: see [Type conversion between JavaScript and SQLite][]. + The result defaults to `NULL` if the return value is `undefined`. This method is used to create SQLite user-defined functions. This method is a wrapper around [`sqlite3_create_function_v2()`][]. @@ -599,10 +602,12 @@ resolution handler passed to [`database.applyChangeset()`][]. See also [Constants Passed To The Conflict Handler]: https://www.sqlite.org/session/c_changeset_conflict.html [Constants Returned From The Conflict Handler]: https://www.sqlite.org/session/c_changeset_abort.html [SQL injection]: https://en.wikipedia.org/wiki/SQL_injection +[Type conversion between JavaScript and SQLite]: #type-conversion-between-javascript-and-sqlite [`ATTACH DATABASE`]: https://www.sqlite.org/lang_attach.html [`PRAGMA foreign_keys`]: https://www.sqlite.org/pragma.html#pragma_foreign_keys [`SQLITE_DETERMINISTIC`]: https://www.sqlite.org/c3ref/c_deterministic.html [`SQLITE_DIRECTONLY`]: https://www.sqlite.org/c3ref/c_deterministic.html +[`SQLITE_MAX_FUNCTION_ARG`]: https://www.sqlite.org/limits.html#max_function_arg [`database.applyChangeset()`]: #databaseapplychangesetchangeset-options [`sqlite3_changes64()`]: https://www.sqlite.org/c3ref/changes.html [`sqlite3_close_v2()`]: https://www.sqlite.org/c3ref/close.html
diff --git a/doc/api/sqlite.md b/doc/api/sqlite.md index 07916addeac91a..df472d4c7899e0 100644 --- a/doc/api/sqlite.md +++ b/doc/api/sqlite.md @@ -182,11 +182,14 @@ added: * `useBigIntArguments` {boolean} If `true`, integer arguments to `function` are converted to `BigInt`s. If `false`, integer arguments are passed as JavaScript numbers. **Default:** `false`. - * `varargs` {boolean} If `true`, `function` can accept a variable number of - arguments. If `false`, `function` must be invoked with exactly - `function.length` arguments. **Default:** `false`. + * `varargs` {boolean} If `true`, `function` may be invoked with any number of + **Default:** `false`. * `function` {Function} The JavaScript function to call when the SQLite - function is invoked. + function is invoked. The return value of this function should be a valid + SQLite data type: see [Type conversion between JavaScript and SQLite][]. + The result defaults to `NULL` if the return value is `undefined`. This method is used to create SQLite user-defined functions. This method is a wrapper around [`sqlite3_create_function_v2()`][]. @@ -599,10 +602,12 @@ resolution handler passed to [`database.applyChangeset()`][]. See also [Constants Passed To The Conflict Handler]: https://www.sqlite.org/session/c_changeset_conflict.html [Constants Returned From The Conflict Handler]: https://www.sqlite.org/session/c_changeset_abort.html [SQL injection]: https://en.wikipedia.org/wiki/SQL_injection +[Type conversion between JavaScript and SQLite]: #type-conversion-between-javascript-and-sqlite [`ATTACH DATABASE`]: https://www.sqlite.org/lang_attach.html [`PRAGMA foreign_keys`]: https://www.sqlite.org/pragma.html#pragma_foreign_keys [`SQLITE_DETERMINISTIC`]: https://www.sqlite.org/c3ref/c_deterministic.html [`SQLITE_DIRECTONLY`]: https://www.sqlite.org/c3ref/c_deterministic.html +[`SQLITE_MAX_FUNCTION_ARG`]: https://www.sqlite.org/limits.html#max_function_arg [`database.applyChangeset()`]: #databaseapplychangesetchangeset-options [`sqlite3_changes64()`]: https://www.sqlite.org/c3ref/changes.html [`sqlite3_close_v2()`]: https://www.sqlite.org/c3ref/close.html
[ "+ arguments (between zero and [`SQLITE_MAX_FUNCTION_ARG`][]). If `false`,", "+ `function` must be invoked with exactly `function.length` arguments." ]
[ 12, 13 ]
{ "additions": 9, "author": "Renegade334", "deletions": 4, "html_url": "https://github.com/nodejs/node/pull/56786", "issue_id": 56786, "merged_at": "2025-01-29T14:21:14Z", "omission_probability": 0.1, "pr_number": 56786, "repo": "nodejs/node", "title": "doc: clarify sqlite user-defined function behaviour", "total_changes": 13 }
856
diff --git a/lib/internal/util/comparisons.js b/lib/internal/util/comparisons.js index 3120c902905f1d..0a7f9af04fb009 100644 --- a/lib/internal/util/comparisons.js +++ b/lib/internal/util/comparisons.js @@ -44,6 +44,7 @@ const { isBooleanObject, isBigIntObject, isSymbolObject, + isFloat16Array, isFloat32Array, isFloat64Array, isKeyObject, @@ -245,7 +246,8 @@ function objectComparisonStart(val1, val2, mode, memos) { if (!isPartialArrayBufferView(val1, val2)) { return false; } - } else if (mode === kLoose && (isFloat32Array(val1) || isFloat64Array(val1))) { + } else if (mode === kLoose && + (isFloat32Array(val1) || isFloat64Array(val1) || isFloat16Array(val1))) { if (!areSimilarFloatArrays(val1, val2)) { return false; } diff --git a/test/parallel/test-assert-partial-deep-equal.js b/test/parallel/test-assert-partial-deep-equal.js index ce0c1e333580c9..061aa03fcb2dd9 100644 --- a/test/parallel/test-assert-partial-deep-equal.js +++ b/test/parallel/test-assert-partial-deep-equal.js @@ -1,3 +1,5 @@ +// Flags: --js-float16array +// TODO(LiviaMedeiros): once `Float16Array` is unflagged in v8, remove the line above 'use strict'; const common = require('../common'); @@ -5,6 +7,9 @@ const vm = require('node:vm'); const assert = require('node:assert'); const { describe, it } = require('node:test'); +// TODO(LiviaMedeiros): once linter recognizes `Float16Array`, remove next line +const { Float16Array } = globalThis; + const x = ['x']; function createCircularObject() { @@ -494,6 +499,11 @@ describe('Object Comparison Tests', () => { actual: { dataView: new Uint8Array(3) }, expected: { dataView: new DataView(new ArrayBuffer(3)) }, }, + { + description: 'throws when comparing Float16Array([+0.0]) with Float16Array([-0.0])', + actual: new Float16Array([+0.0]), + expected: new Float16Array([-0.0]), + }, { description: 'throws when comparing Float32Array([+0.0]) with Float32Array([-0.0])', actual: new Float32Array([+0.0]), diff --git a/test/parallel/test-assert-typedarray-deepequal.js b/test/parallel/test-assert-typedarray-deepequal.js index 7fb18c1886ba91..371e7161145593 100644 --- a/test/parallel/test-assert-typedarray-deepequal.js +++ b/test/parallel/test-assert-typedarray-deepequal.js @@ -1,9 +1,14 @@ +// Flags: --js-float16array +// TODO(LiviaMedeiros): once `Float16Array` is unflagged in v8, remove the line above 'use strict'; require('../common'); const assert = require('assert'); const { test, suite } = require('node:test'); +// TODO(LiviaMedeiros): once linter recognizes `Float16Array`, remove next line +const { Float16Array } = globalThis; + function makeBlock(f) { const args = Array.prototype.slice.call(arguments, 1); return function() { @@ -20,6 +25,7 @@ suite('equalArrayPairs', () => { [new Int8Array(1e5), new Int8Array(1e5)], [new Int16Array(1e5), new Int16Array(1e5)], [new Int32Array(1e5), new Int32Array(1e5)], + [new Float16Array(1e5), new Float16Array(1e5)], [new Float32Array(1e5), new Float32Array(1e5)], [new Float64Array(1e5), new Float64Array(1e5)], [new Float32Array([+0.0]), new Float32Array([+0.0])], @@ -41,6 +47,7 @@ suite('equalArrayPairs', () => { suite('looseEqualArrayPairs', () => { const looseEqualArrayPairs = [ + [new Float16Array([+0.0]), new Float16Array([-0.0])], [new Float32Array([+0.0]), new Float32Array([-0.0])], [new Float64Array([+0.0]), new Float64Array([-0.0])], ]; @@ -71,6 +78,8 @@ suite('notEqualArrayPairs', () => { [new Int16Array([0]), new Uint16Array([256])], [new Int16Array([-256]), new Uint16Array([0xff00])], // same bits [new Int32Array([-256]), new Uint32Array([0xffffff00])], // ditto + [new Float16Array([0.1]), new Float16Array([0.0])], + [new Float16Array([0.1]), new Float16Array([0.1, 0.2])], [new Float32Array([0.1]), new Float32Array([0.0])], [new Float32Array([0.1]), new Float32Array([0.1, 0.2])], [new Float64Array([0.1]), new Float64Array([0.0])],
diff --git a/lib/internal/util/comparisons.js b/lib/internal/util/comparisons.js index 3120c902905f1d..0a7f9af04fb009 100644 --- a/lib/internal/util/comparisons.js +++ b/lib/internal/util/comparisons.js @@ -44,6 +44,7 @@ const { isBooleanObject, isBigIntObject, isSymbolObject, + isFloat16Array, isFloat32Array, isFloat64Array, isKeyObject, @@ -245,7 +246,8 @@ function objectComparisonStart(val1, val2, mode, memos) { if (!isPartialArrayBufferView(val1, val2)) { - } else if (mode === kLoose && (isFloat32Array(val1) || isFloat64Array(val1))) { + } else if (mode === kLoose && + (isFloat32Array(val1) || isFloat64Array(val1) || isFloat16Array(val1))) { if (!areSimilarFloatArrays(val1, val2)) { diff --git a/test/parallel/test-assert-partial-deep-equal.js b/test/parallel/test-assert-partial-deep-equal.js index ce0c1e333580c9..061aa03fcb2dd9 100644 --- a/test/parallel/test-assert-partial-deep-equal.js +++ b/test/parallel/test-assert-partial-deep-equal.js @@ -1,3 +1,5 @@ const common = require('../common'); @@ -5,6 +7,9 @@ const vm = require('node:vm'); const assert = require('node:assert'); const { describe, it } = require('node:test'); const x = ['x']; function createCircularObject() { @@ -494,6 +499,11 @@ describe('Object Comparison Tests', () => { actual: { dataView: new Uint8Array(3) }, expected: { dataView: new DataView(new ArrayBuffer(3)) }, }, + { + description: 'throws when comparing Float16Array([+0.0]) with Float16Array([-0.0])', + actual: new Float16Array([+0.0]), + expected: new Float16Array([-0.0]), { description: 'throws when comparing Float32Array([+0.0]) with Float32Array([-0.0])', actual: new Float32Array([+0.0]), diff --git a/test/parallel/test-assert-typedarray-deepequal.js b/test/parallel/test-assert-typedarray-deepequal.js index 7fb18c1886ba91..371e7161145593 100644 --- a/test/parallel/test-assert-typedarray-deepequal.js +++ b/test/parallel/test-assert-typedarray-deepequal.js @@ -1,9 +1,14 @@ require('../common'); const assert = require('assert'); const { test, suite } = require('node:test'); function makeBlock(f) { const args = Array.prototype.slice.call(arguments, 1); return function() { @@ -20,6 +25,7 @@ suite('equalArrayPairs', () => { [new Int8Array(1e5), new Int8Array(1e5)], [new Int16Array(1e5), new Int16Array(1e5)], [new Int32Array(1e5), new Int32Array(1e5)], + [new Float16Array(1e5), new Float16Array(1e5)], [new Float32Array(1e5), new Float32Array(1e5)], [new Float64Array(1e5), new Float64Array(1e5)], [new Float32Array([+0.0]), new Float32Array([+0.0])], @@ -41,6 +47,7 @@ suite('equalArrayPairs', () => { suite('looseEqualArrayPairs', () => { const looseEqualArrayPairs = [ + [new Float16Array([+0.0]), new Float16Array([-0.0])], [new Float32Array([+0.0]), new Float32Array([-0.0])], [new Float64Array([+0.0]), new Float64Array([-0.0])], ]; @@ -71,6 +78,8 @@ suite('notEqualArrayPairs', () => { [new Int16Array([0]), new Uint16Array([256])], [new Int16Array([-256]), new Uint16Array([0xff00])], // same bits [new Int32Array([-256]), new Uint32Array([0xffffff00])], // ditto + [new Float16Array([0.1]), new Float16Array([0.0])], [new Float32Array([0.1]), new Float32Array([0.0])], [new Float32Array([0.1]), new Float32Array([0.1, 0.2])], [new Float64Array([0.1]), new Float64Array([0.0])],
[ "+ },", "+ [new Float16Array([0.1]), new Float16Array([0.1, 0.2])]," ]
[ 50, 94 ]
{ "additions": 22, "author": "LiviaMedeiros", "deletions": 1, "html_url": "https://github.com/nodejs/node/pull/57881", "issue_id": 57881, "merged_at": "2025-04-19T17:04:04Z", "omission_probability": 0.1, "pr_number": 57881, "repo": "nodejs/node", "title": "assert: support `Float16Array` in loose deep equality checks", "total_changes": 23 }
857
diff --git a/lib/internal/http2/core.js b/lib/internal/http2/core.js index 554221ac614636..31c3c9e3a4fde5 100644 --- a/lib/internal/http2/core.js +++ b/lib/internal/http2/core.js @@ -1068,6 +1068,7 @@ function setupHandle(socket, type, options) { if (typeof options.selectPadding === 'function') this[kSelectPadding] = options.selectPadding; handle.consume(socket._handle); + handle.ongracefulclosecomplete = this[kMaybeDestroy].bind(this, null); this[kHandle] = handle; if (this[kNativeFields]) { @@ -1589,6 +1590,10 @@ class Http2Session extends EventEmitter { if (typeof callback === 'function') this.once('close', callback); this.goaway(); + const handle = this[kHandle]; + if (handle) { + handle.setGracefulClose(); + } this[kMaybeDestroy](); } @@ -1609,11 +1614,13 @@ class Http2Session extends EventEmitter { // * session is closed and there are no more pending or open streams [kMaybeDestroy](error) { if (error == null) { + const handle = this[kHandle]; + const hasPendingData = !!handle && handle.hasPendingData(); const state = this[kState]; // Do not destroy if we're not closed and there are pending/open streams if (!this.closed || state.streams.size > 0 || - state.pendingStreams.size > 0) { + state.pendingStreams.size > 0 || hasPendingData) { return; } } @@ -3300,7 +3307,7 @@ function socketOnClose() { state.streams.forEach((stream) => stream.close(NGHTTP2_CANCEL)); state.pendingStreams.forEach((stream) => stream.close(NGHTTP2_CANCEL)); session.close(); - session[kMaybeDestroy](err); + closeSession(session, NGHTTP2_NO_ERROR, err); } } diff --git a/src/env_properties.h b/src/env_properties.h index 6ccc581034f4b2..3e82a9b543e9db 100644 --- a/src/env_properties.h +++ b/src/env_properties.h @@ -285,6 +285,7 @@ V(onsignal_string, "onsignal") \ V(onunpipe_string, "onunpipe") \ V(onwrite_string, "onwrite") \ + V(ongracefulclosecomplete_string, "ongracefulclosecomplete") \ V(openssl_error_stack, "opensslErrorStack") \ V(options_string, "options") \ V(order_string, "order") \ diff --git a/src/node_http2.cc b/src/node_http2.cc index b56b0d76cc7c0b..ea05aaacab8268 100644 --- a/src/node_http2.cc +++ b/src/node_http2.cc @@ -559,7 +559,8 @@ Http2Session::Http2Session(Http2State* http2_state, : AsyncWrap(http2_state->env(), wrap, AsyncWrap::PROVIDER_HTTP2SESSION), js_fields_(http2_state->env()->isolate()), session_type_(type), - http2_state_(http2_state) { + http2_state_(http2_state), + graceful_close_initiated_(false) { MakeWeak(); statistics_.session_type = type; statistics_.start_time = uv_hrtime(); @@ -765,6 +766,24 @@ void Http2Stream::EmitStatistics() { }); } +void Http2Session::HasPendingData(const FunctionCallbackInfo<Value>& args) { + Http2Session* session; + ASSIGN_OR_RETURN_UNWRAP(&session, args.Holder()); + args.GetReturnValue().Set(session->HasPendingData()); +} + +bool Http2Session::HasPendingData() const { + nghttp2_session* session = session_.get(); + int want_write = nghttp2_session_want_write(session); + // It is expected that want_read will alway be 0 if graceful + // session close is initiated and goaway frame is sent. + int want_read = nghttp2_session_want_read(session); + if (want_write == 0 && want_read == 0) { + return false; + } + return true; +} + void Http2Session::EmitStatistics() { if (!HasHttp2Observer(env())) [[likely]] { return; @@ -1743,6 +1762,7 @@ void Http2Session::HandleSettingsFrame(const nghttp2_frame* frame) { void Http2Session::OnStreamAfterWrite(WriteWrap* w, int status) { Debug(this, "write finished with status %d", status); + MaybeNotifyGracefulCloseComplete(); CHECK(is_write_in_progress()); set_write_in_progress(false); @@ -1965,6 +1985,7 @@ uint8_t Http2Session::SendPendingData() { if (!res.async) { set_write_in_progress(false); ClearOutgoing(res.err); + MaybeNotifyGracefulCloseComplete(); } MaybeStopReading(); @@ -3478,6 +3499,8 @@ void Initialize(Local<Object> target, SetProtoMethod(isolate, session, "receive", Http2Session::Receive); SetProtoMethod(isolate, session, "destroy", Http2Session::Destroy); SetProtoMethod(isolate, session, "goaway", Http2Session::Goaway); + SetProtoMethod( + isolate, session, "hasPendingData", Http2Session::HasPendingData); SetProtoMethod(isolate, session, "settings", Http2Session::Settings); SetProtoMethod(isolate, session, "request", Http2Session::Request); SetProtoMethod( @@ -3498,6 +3521,8 @@ void Initialize(Local<Object> target, "remoteSettings", Http2Session::RefreshSettings<nghttp2_session_get_remote_settings, false>); + SetProtoMethod( + isolate, session, "setGracefulClose", Http2Session::SetGracefulClose); SetConstructorFunction(context, target, "Http2Session", session); Local<Object> constants = Object::New(isolate); @@ -3552,6 +3577,38 @@ void Initialize(Local<Object> target, nghttp2_set_debug_vprintf_callback(NgHttp2Debug); #endif } + +void Http2Session::SetGracefulClose(const FunctionCallbackInfo<Value>& args) { + Http2Session* session; + ASSIGN_OR_RETURN_UNWRAP(&session, args.Holder()); + CHECK_NOT_NULL(session); + // Set the graceful close flag + session->SetGracefulCloseInitiated(true); + + Debug(session, "Setting graceful close initiated flag"); +} + +void Http2Session::MaybeNotifyGracefulCloseComplete() { + nghttp2_session* session = session_.get(); + + if (!IsGracefulCloseInitiated()) { + return; + } + + int want_write = nghttp2_session_want_write(session); + int want_read = nghttp2_session_want_read(session); + bool should_notify = (want_write == 0 && want_read == 0); + + if (should_notify) { + Debug(this, "Notifying JS after write in graceful close mode"); + + // Make the callback to JavaScript + HandleScope scope(env()->isolate()); + MakeCallback(env()->ongracefulclosecomplete_string(), 0, nullptr); + } + + return; +} } // namespace http2 } // namespace node diff --git a/src/node_http2.h b/src/node_http2.h index 3ba05cbe7f9ce6..a60a7ba029db3e 100644 --- a/src/node_http2.h +++ b/src/node_http2.h @@ -712,6 +712,7 @@ class Http2Session : public AsyncWrap, static void Consume(const v8::FunctionCallbackInfo<v8::Value>& args); static void Receive(const v8::FunctionCallbackInfo<v8::Value>& args); static void Destroy(const v8::FunctionCallbackInfo<v8::Value>& args); + static void HasPendingData(const v8::FunctionCallbackInfo<v8::Value>& args); static void Settings(const v8::FunctionCallbackInfo<v8::Value>& args); static void Request(const v8::FunctionCallbackInfo<v8::Value>& args); static void SetNextStreamID(const v8::FunctionCallbackInfo<v8::Value>& args); @@ -723,6 +724,7 @@ class Http2Session : public AsyncWrap, static void Ping(const v8::FunctionCallbackInfo<v8::Value>& args); static void AltSvc(const v8::FunctionCallbackInfo<v8::Value>& args); static void Origin(const v8::FunctionCallbackInfo<v8::Value>& args); + static void SetGracefulClose(const v8::FunctionCallbackInfo<v8::Value>& args); template <get_setting fn, bool local> static void RefreshSettings(const v8::FunctionCallbackInfo<v8::Value>& args); @@ -735,6 +737,7 @@ class Http2Session : public AsyncWrap, BaseObjectPtr<Http2Ping> PopPing(); bool AddPing(const uint8_t* data, v8::Local<v8::Function> callback); + bool HasPendingData() const; BaseObjectPtr<Http2Settings> PopSettings(); bool AddSettings(v8::Local<v8::Function> callback); @@ -785,6 +788,13 @@ class Http2Session : public AsyncWrap, Statistics statistics_ = {}; + bool IsGracefulCloseInitiated() const { + return graceful_close_initiated_; + } + void SetGracefulCloseInitiated(bool value) { + graceful_close_initiated_ = value; + } + private: void EmitStatistics(); @@ -951,8 +961,13 @@ class Http2Session : public AsyncWrap, void CopyDataIntoOutgoing(const uint8_t* src, size_t src_length); void ClearOutgoing(int status); + void MaybeNotifyGracefulCloseComplete(); + friend class Http2Scope; friend class Http2StreamListener; + + // Flag to indicate that JavaScript has initiated a graceful closure + bool graceful_close_initiated_ = false; }; struct Http2SessionPerformanceEntryTraits { diff --git a/test/parallel/test-http2-client-rststream-before-connect.js b/test/parallel/test-http2-client-rststream-before-connect.js index bc0cb5ff619dc0..788253d29ae22f 100644 --- a/test/parallel/test-http2-client-rststream-before-connect.js +++ b/test/parallel/test-http2-client-rststream-before-connect.js @@ -5,16 +5,23 @@ if (!common.hasCrypto) common.skip('missing crypto'); const assert = require('assert'); const h2 = require('http2'); +let client; const server = h2.createServer(); server.on('stream', (stream) => { - stream.on('close', common.mustCall()); - stream.respond(); - stream.end('ok'); + stream.on('close', common.mustCall(() => { + client.close(); + server.close(); + })); + stream.on('error', common.expectsError({ + code: 'ERR_HTTP2_STREAM_ERROR', + name: 'Error', + message: 'Stream closed with error code NGHTTP2_PROTOCOL_ERROR' + })); }); server.listen(0, common.mustCall(() => { - const client = h2.connect(`http://localhost:${server.address().port}`); + client = h2.connect(`http://localhost:${server.address().port}`); const req = client.request(); const closeCode = 1; @@ -52,8 +59,6 @@ server.listen(0, common.mustCall(() => { req.on('close', common.mustCall(() => { assert.strictEqual(req.destroyed, true); assert.strictEqual(req.rstCode, closeCode); - server.close(); - client.close(); })); req.on('error', common.expectsError({ diff --git a/test/parallel/test-http2-session-graceful-close.js b/test/parallel/test-http2-session-graceful-close.js new file mode 100644 index 00000000000000..174eb037dce5b4 --- /dev/null +++ b/test/parallel/test-http2-session-graceful-close.js @@ -0,0 +1,48 @@ +'use strict'; + +const common = require('../common'); +if (!common.hasCrypto) + common.skip('missing crypto'); +const assert = require('assert'); +const h2 = require('http2'); + +const server = h2.createServer(); +let session; + +server.on('session', common.mustCall(function(s) { + session = s; + session.on('close', common.mustCall(function() { + server.close(); + })); +})); + +server.listen(0, common.mustCall(function() { + const port = server.address().port; + + const url = `http://localhost:${port}`; + const client = h2.connect(url, common.mustCall(function() { + const headers = { + ':path': '/', + ':method': 'GET', + ':scheme': 'http', + ':authority': `localhost:${port}` + }; + const request = client.request(headers); + request.on('response', common.mustCall(function(headers) { + assert.strictEqual(headers[':status'], 200); + }, 1)); + request.on('end', common.mustCall(function() { + client.close(); + })); + request.end(); + request.resume(); + })); + client.on('goaway', common.mustCallAtLeast(1)); +})); + +server.once('request', common.mustCall(function(request, response) { + response.on('finish', common.mustCall(function() { + session.close(); + })); + response.end(); +}));
diff --git a/lib/internal/http2/core.js b/lib/internal/http2/core.js index 554221ac614636..31c3c9e3a4fde5 100644 --- a/lib/internal/http2/core.js +++ b/lib/internal/http2/core.js @@ -1068,6 +1068,7 @@ function setupHandle(socket, type, options) { if (typeof options.selectPadding === 'function') this[kSelectPadding] = options.selectPadding; handle.consume(socket._handle); + handle.ongracefulclosecomplete = this[kMaybeDestroy].bind(this, null); this[kHandle] = handle; if (this[kNativeFields]) { @@ -1589,6 +1590,10 @@ class Http2Session extends EventEmitter { if (typeof callback === 'function') this.once('close', callback); this.goaway(); + const handle = this[kHandle]; + if (handle) { + } this[kMaybeDestroy](); @@ -1609,11 +1614,13 @@ class Http2Session extends EventEmitter { // * session is closed and there are no more pending or open streams [kMaybeDestroy](error) { if (error == null) { + const handle = this[kHandle]; + const hasPendingData = !!handle && handle.hasPendingData(); const state = this[kState]; // Do not destroy if we're not closed and there are pending/open streams if (!this.closed || state.streams.size > 0 || - state.pendingStreams.size > 0) { + state.pendingStreams.size > 0 || hasPendingData) { return; } } @@ -3300,7 +3307,7 @@ function socketOnClose() { state.streams.forEach((stream) => stream.close(NGHTTP2_CANCEL)); state.pendingStreams.forEach((stream) => stream.close(NGHTTP2_CANCEL)); session.close(); - session[kMaybeDestroy](err); + closeSession(session, NGHTTP2_NO_ERROR, err); diff --git a/src/env_properties.h b/src/env_properties.h index 6ccc581034f4b2..3e82a9b543e9db 100644 --- a/src/env_properties.h +++ b/src/env_properties.h @@ -285,6 +285,7 @@ V(onsignal_string, "onsignal") \ V(onunpipe_string, "onunpipe") \ V(onwrite_string, "onwrite") \ + V(ongracefulclosecomplete_string, "ongracefulclosecomplete") \ V(openssl_error_stack, "opensslErrorStack") \ V(options_string, "options") \ V(order_string, "order") \ diff --git a/src/node_http2.cc b/src/node_http2.cc index b56b0d76cc7c0b..ea05aaacab8268 100644 --- a/src/node_http2.cc +++ b/src/node_http2.cc @@ -559,7 +559,8 @@ Http2Session::Http2Session(Http2State* http2_state, : AsyncWrap(http2_state->env(), wrap, AsyncWrap::PROVIDER_HTTP2SESSION), js_fields_(http2_state->env()->isolate()), session_type_(type), - http2_state_(http2_state) { + http2_state_(http2_state), + graceful_close_initiated_(false) { MakeWeak(); statistics_.session_type = type; statistics_.start_time = uv_hrtime(); @@ -765,6 +766,24 @@ void Http2Stream::EmitStatistics() { }); +void Http2Session::HasPendingData(const FunctionCallbackInfo<Value>& args) { + args.GetReturnValue().Set(session->HasPendingData()); +bool Http2Session::HasPendingData() const { + // It is expected that want_read will alway be 0 if graceful + // session close is initiated and goaway frame is sent. + if (want_write == 0 && want_read == 0) { + return false; + return true; void Http2Session::EmitStatistics() { if (!HasHttp2Observer(env())) [[likely]] { return; @@ -1743,6 +1762,7 @@ void Http2Session::HandleSettingsFrame(const nghttp2_frame* frame) { void Http2Session::OnStreamAfterWrite(WriteWrap* w, int status) { Debug(this, "write finished with status %d", status); + MaybeNotifyGracefulCloseComplete(); CHECK(is_write_in_progress()); set_write_in_progress(false); @@ -1965,6 +1985,7 @@ uint8_t Http2Session::SendPendingData() { if (!res.async) { set_write_in_progress(false); ClearOutgoing(res.err); + MaybeNotifyGracefulCloseComplete(); MaybeStopReading(); @@ -3478,6 +3499,8 @@ void Initialize(Local<Object> target, SetProtoMethod(isolate, session, "receive", Http2Session::Receive); SetProtoMethod(isolate, session, "destroy", Http2Session::Destroy); SetProtoMethod(isolate, session, "goaway", Http2Session::Goaway); + isolate, session, "hasPendingData", Http2Session::HasPendingData); SetProtoMethod(isolate, session, "settings", Http2Session::Settings); SetProtoMethod(isolate, session, "request", Http2Session::Request); SetProtoMethod( @@ -3498,6 +3521,8 @@ void Initialize(Local<Object> target, "remoteSettings", Http2Session::RefreshSettings<nghttp2_session_get_remote_settings, false>); + isolate, session, "setGracefulClose", Http2Session::SetGracefulClose); SetConstructorFunction(context, target, "Http2Session", session); Local<Object> constants = Object::New(isolate); @@ -3552,6 +3577,38 @@ void Initialize(Local<Object> target, nghttp2_set_debug_vprintf_callback(NgHttp2Debug); #endif +void Http2Session::SetGracefulClose(const FunctionCallbackInfo<Value>& args) { + CHECK_NOT_NULL(session); + // Set the graceful close flag + session->SetGracefulCloseInitiated(true); + Debug(session, "Setting graceful close initiated flag"); +void Http2Session::MaybeNotifyGracefulCloseComplete() { + if (!IsGracefulCloseInitiated()) { + bool should_notify = (want_write == 0 && want_read == 0); + if (should_notify) { + Debug(this, "Notifying JS after write in graceful close mode"); + // Make the callback to JavaScript + HandleScope scope(env()->isolate()); + MakeCallback(env()->ongracefulclosecomplete_string(), 0, nullptr); + return; } // namespace http2 } // namespace node diff --git a/src/node_http2.h b/src/node_http2.h index 3ba05cbe7f9ce6..a60a7ba029db3e 100644 --- a/src/node_http2.h +++ b/src/node_http2.h @@ -712,6 +712,7 @@ class Http2Session : public AsyncWrap, static void Consume(const v8::FunctionCallbackInfo<v8::Value>& args); static void Receive(const v8::FunctionCallbackInfo<v8::Value>& args); static void Destroy(const v8::FunctionCallbackInfo<v8::Value>& args); + static void HasPendingData(const v8::FunctionCallbackInfo<v8::Value>& args); static void Settings(const v8::FunctionCallbackInfo<v8::Value>& args); static void Request(const v8::FunctionCallbackInfo<v8::Value>& args); static void SetNextStreamID(const v8::FunctionCallbackInfo<v8::Value>& args); @@ -723,6 +724,7 @@ class Http2Session : public AsyncWrap, static void Ping(const v8::FunctionCallbackInfo<v8::Value>& args); static void AltSvc(const v8::FunctionCallbackInfo<v8::Value>& args); static void Origin(const v8::FunctionCallbackInfo<v8::Value>& args); + static void SetGracefulClose(const v8::FunctionCallbackInfo<v8::Value>& args); template <get_setting fn, bool local> static void RefreshSettings(const v8::FunctionCallbackInfo<v8::Value>& args); @@ -735,6 +737,7 @@ class Http2Session : public AsyncWrap, BaseObjectPtr<Http2Ping> PopPing(); bool AddPing(const uint8_t* data, v8::Local<v8::Function> callback); + bool HasPendingData() const; BaseObjectPtr<Http2Settings> PopSettings(); bool AddSettings(v8::Local<v8::Function> callback); @@ -785,6 +788,13 @@ class Http2Session : public AsyncWrap, Statistics statistics_ = {}; + bool IsGracefulCloseInitiated() const { + return graceful_close_initiated_; + void SetGracefulCloseInitiated(bool value) { + graceful_close_initiated_ = value; private: void EmitStatistics(); @@ -951,8 +961,13 @@ class Http2Session : public AsyncWrap, void CopyDataIntoOutgoing(const uint8_t* src, size_t src_length); void ClearOutgoing(int status); + void MaybeNotifyGracefulCloseComplete(); friend class Http2Scope; friend class Http2StreamListener; + // Flag to indicate that JavaScript has initiated a graceful closure + bool graceful_close_initiated_ = false; }; struct Http2SessionPerformanceEntryTraits { diff --git a/test/parallel/test-http2-client-rststream-before-connect.js b/test/parallel/test-http2-client-rststream-before-connect.js index bc0cb5ff619dc0..788253d29ae22f 100644 --- a/test/parallel/test-http2-client-rststream-before-connect.js +++ b/test/parallel/test-http2-client-rststream-before-connect.js @@ -5,16 +5,23 @@ if (!common.hasCrypto) common.skip('missing crypto'); const assert = require('assert'); const h2 = require('http2'); +let client; const server = h2.createServer(); server.on('stream', (stream) => { - stream.on('close', common.mustCall()); - stream.respond(); - stream.end('ok'); + stream.on('close', common.mustCall(() => { + client.close(); + stream.on('error', common.expectsError({ + name: 'Error', + message: 'Stream closed with error code NGHTTP2_PROTOCOL_ERROR' }); server.listen(0, common.mustCall(() => { - const client = h2.connect(`http://localhost:${server.address().port}`); + client = h2.connect(`http://localhost:${server.address().port}`); const req = client.request(); const closeCode = 1; @@ -52,8 +59,6 @@ server.listen(0, common.mustCall(() => { req.on('close', common.mustCall(() => { assert.strictEqual(req.destroyed, true); assert.strictEqual(req.rstCode, closeCode); - server.close(); - client.close(); })); req.on('error', common.expectsError({ diff --git a/test/parallel/test-http2-session-graceful-close.js b/test/parallel/test-http2-session-graceful-close.js new file mode 100644 index 00000000000000..174eb037dce5b4 --- /dev/null +++ b/test/parallel/test-http2-session-graceful-close.js @@ -0,0 +1,48 @@ +'use strict'; +const common = require('../common'); +if (!common.hasCrypto) + common.skip('missing crypto'); +const assert = require('assert'); +const h2 = require('http2'); +const server = h2.createServer(); +let session; +server.on('session', common.mustCall(function(s) { + session = s; + session.on('close', common.mustCall(function() { +server.listen(0, common.mustCall(function() { + const port = server.address().port; + const url = `http://localhost:${port}`; + const client = h2.connect(url, common.mustCall(function() { + const headers = { + ':path': '/', + ':method': 'GET', + ':scheme': 'http', + ':authority': `localhost:${port}` + }; + const request = client.request(headers); + request.on('response', common.mustCall(function(headers) { + }, 1)); + client.close(); + })); + request.end(); + request.resume(); + client.on('goaway', common.mustCallAtLeast(1)); +server.once('request', common.mustCall(function(request, response) { + session.close(); + response.end();
[ "+ handle.setGracefulClose();", "+ return;", "+ code: 'ERR_HTTP2_STREAM_ERROR',", "+ assert.strictEqual(headers[':status'], 200);", "+ request.on('end', common.mustCall(function() {", "+ response.on('finish', common.mustCall(function() {" ]
[ 18, 151, 247, 305, 307, 317 ]
{ "additions": 142, "author": "pandeykushagra51", "deletions": 9, "html_url": "https://github.com/nodejs/node/pull/57808", "issue_id": 57808, "merged_at": "2025-04-19T16:36:03Z", "omission_probability": 0.1, "pr_number": 57808, "repo": "nodejs/node", "title": "http2: fix graceful session close", "total_changes": 151 }
858
diff --git a/test/parallel/test-child-process-bad-stdio.js b/test/parallel/test-child-process-bad-stdio.js index 90e8ddd0215a2b..b612fc832281a6 100644 --- a/test/parallel/test-child-process-bad-stdio.js +++ b/test/parallel/test-child-process-bad-stdio.js @@ -1,21 +1,23 @@ 'use strict'; // Flags: --expose-internals const common = require('../common'); -const assert = require('assert'); -const cp = require('child_process'); if (process.argv[2] === 'child') { setTimeout(() => {}, common.platformTimeout(100)); return; } +const assert = require('node:assert'); +const cp = require('node:child_process'); +const { mock, test } = require('node:test'); +const { ChildProcess } = require('internal/child_process'); + // Monkey patch spawn() to create a child process normally, but destroy the // stdout and stderr streams. This replicates the conditions where the streams // cannot be properly created. -const ChildProcess = require('internal/child_process').ChildProcess; const original = ChildProcess.prototype.spawn; -ChildProcess.prototype.spawn = function() { +mock.method(ChildProcess.prototype, 'spawn', function() { const err = original.apply(this, arguments); this.stdout.destroy(); @@ -24,7 +26,7 @@ ChildProcess.prototype.spawn = function() { this.stderr = null; return err; -}; +}); function createChild(options, callback) { const [cmd, opts] = common.escapePOSIXShell`"${process.execPath}" "${__filename}" child`; @@ -33,32 +35,32 @@ function createChild(options, callback) { return cp.exec(cmd, options, common.mustCall(callback)); } -// Verify that normal execution of a child process is handled. -{ +test('normal execution of a child process is handled', (_, done) => { createChild({}, (err, stdout, stderr) => { assert.strictEqual(err, null); assert.strictEqual(stdout, ''); assert.strictEqual(stderr, ''); + done(); }); -} +}); -// Verify that execution with an error event is handled. -{ +test('execution with an error event is handled', (_, done) => { const error = new Error('foo'); const child = createChild({}, (err, stdout, stderr) => { assert.strictEqual(err, error); assert.strictEqual(stdout, ''); assert.strictEqual(stderr, ''); + done(); }); child.emit('error', error); -} +}); -// Verify that execution with a killed process is handled. -{ +test('execution with a killed process is handled', (_, done) => { createChild({ timeout: 1 }, (err, stdout, stderr) => { assert.strictEqual(err.killed, true); assert.strictEqual(stdout, ''); assert.strictEqual(stderr, ''); + done(); }); -} +});
diff --git a/test/parallel/test-child-process-bad-stdio.js b/test/parallel/test-child-process-bad-stdio.js index 90e8ddd0215a2b..b612fc832281a6 100644 --- a/test/parallel/test-child-process-bad-stdio.js +++ b/test/parallel/test-child-process-bad-stdio.js @@ -1,21 +1,23 @@ 'use strict'; // Flags: --expose-internals const common = require('../common'); -const assert = require('assert'); -const cp = require('child_process'); if (process.argv[2] === 'child') { setTimeout(() => {}, common.platformTimeout(100)); return; +const assert = require('node:assert'); +const cp = require('node:child_process'); +const { mock, test } = require('node:test'); +const { ChildProcess } = require('internal/child_process'); + // Monkey patch spawn() to create a child process normally, but destroy the // stdout and stderr streams. This replicates the conditions where the streams // cannot be properly created. const original = ChildProcess.prototype.spawn; -ChildProcess.prototype.spawn = function() { +mock.method(ChildProcess.prototype, 'spawn', function() { const err = original.apply(this, arguments); this.stdout.destroy(); @@ -24,7 +26,7 @@ ChildProcess.prototype.spawn = function() { this.stderr = null; return err; -}; function createChild(options, callback) { const [cmd, opts] = common.escapePOSIXShell`"${process.execPath}" "${__filename}" child`; @@ -33,32 +35,32 @@ function createChild(options, callback) { return cp.exec(cmd, options, common.mustCall(callback)); -// Verify that normal execution of a child process is handled. +test('normal execution of a child process is handled', (_, done) => { createChild({}, (err, stdout, stderr) => { assert.strictEqual(err, null); -// Verify that execution with an error event is handled. +test('execution with an error event is handled', (_, done) => { const error = new Error('foo'); const child = createChild({}, (err, stdout, stderr) => { assert.strictEqual(err, error); child.emit('error', error); -// Verify that execution with a killed process is handled. +test('execution with a killed process is handled', (_, done) => { createChild({ timeout: 1 }, (err, stdout, stderr) => { assert.strictEqual(err.killed, true);
[ "-const ChildProcess = require('internal/child_process').ChildProcess;" ]
[ 24 ]
{ "additions": 16, "author": "cjihrig", "deletions": 14, "html_url": "https://github.com/nodejs/node/pull/56562", "issue_id": 56562, "merged_at": "2025-01-13T20:51:55Z", "omission_probability": 0.1, "pr_number": 56562, "repo": "nodejs/node", "title": "test: update test-child-process-bad-stdio to use node:test", "total_changes": 30 }
859
diff --git a/tools/v8_gypfiles/toolchain.gypi b/tools/v8_gypfiles/toolchain.gypi index a5388783d9b88f..937f3f51976163 100644 --- a/tools/v8_gypfiles/toolchain.gypi +++ b/tools/v8_gypfiles/toolchain.gypi @@ -97,33 +97,6 @@ # Indicates if gcmole tools are downloaded by a hook. 'gcmole%': 0, }, - - # [GYP] this needs to be outside of the top level 'variables' - 'conditions': [ - ['host_arch=="ia32" or host_arch=="x64" or \ - host_arch=="ppc64" or \ - host_arch=="s390x" or \ - clang==1', { - 'variables': { - 'host_cxx_is_biarch%': 1, - }, - }, { - 'variables': { - 'host_cxx_is_biarch%': 0, - }, - }], - ['target_arch=="ia32" or target_arch=="x64" or \ - target_arch=="ppc64" or \ - target_arch=="s390x" or clang==1', { - 'variables': { - 'target_cxx_is_biarch%': 1, - }, - }, { - 'variables': { - 'target_cxx_is_biarch%': 0, - }, - }], - ], 'target_defaults': { 'include_dirs': [ '<(V8_ROOT)', @@ -574,70 +547,6 @@ '-mmmx', # Allows mmintrin.h for MMX intrinsics. ], }], - ['(OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="solaris" \ - or OS=="netbsd" or OS=="mac" or OS=="android" or OS=="qnx") and \ - (v8_target_arch=="arm" or v8_target_arch=="ia32")', { - 'target_conditions': [ - ['_toolset=="host"', { - 'conditions': [ - ['host_cxx_is_biarch==1', { - 'conditions': [ - ['host_arch=="s390x"', { - 'cflags': [ '-m31' ], - 'ldflags': [ '-m31' ] - },{ - 'cflags': [ '-m32' ], - 'ldflags': [ '-m32' ] - }], - ], - }], - ], - 'xcode_settings': { - 'ARCHS': [ 'i386' ], - }, - }], - ['_toolset=="target"', { - 'conditions': [ - ['target_cxx_is_biarch==1', { - 'conditions': [ - ['host_arch=="s390x"', { - 'cflags': [ '-m31' ], - 'ldflags': [ '-m31' ] - },{ - 'cflags': [ '-m32' ], - 'ldflags': [ '-m32' ], - }], - ], - }], - ], - 'xcode_settings': { - 'ARCHS': [ 'i386' ], - }, - }], - ], - }], - ['(OS=="linux" or OS=="android") and \ - (v8_target_arch=="x64" or v8_target_arch=="arm64" or \ - v8_target_arch=="ppc64" or v8_target_arch=="s390x")', { - 'target_conditions': [ - ['_toolset=="host"', { - 'conditions': [ - ['host_cxx_is_biarch==1', { - 'cflags': [ '-m64' ], - 'ldflags': [ '-m64' ] - }], - ], - }], - ['_toolset=="target"', { - 'conditions': [ - ['target_cxx_is_biarch==1', { - 'cflags': [ '-m64' ], - 'ldflags': [ '-m64' ], - }], - ] - }], - ], - }], ['OS=="android" and v8_android_log_stdout==1', { 'defines': [ 'V8_ANDROID_LOG_STDOUT',
diff --git a/tools/v8_gypfiles/toolchain.gypi b/tools/v8_gypfiles/toolchain.gypi index a5388783d9b88f..937f3f51976163 100644 --- a/tools/v8_gypfiles/toolchain.gypi +++ b/tools/v8_gypfiles/toolchain.gypi @@ -97,33 +97,6 @@ # Indicates if gcmole tools are downloaded by a hook. 'gcmole%': 0, }, - - # [GYP] this needs to be outside of the top level 'variables' - 'conditions': [ - ['host_arch=="ia32" or host_arch=="x64" or \ - host_arch=="ppc64" or \ - host_arch=="s390x" or \ - clang==1', { - 'host_cxx_is_biarch%': 1, - target_arch=="ppc64" or \ - target_arch=="s390x" or clang==1', { - 'target_cxx_is_biarch%': 1, - 'target_cxx_is_biarch%': 0, - ], 'target_defaults': { 'include_dirs': [ '<(V8_ROOT)', @@ -574,70 +547,6 @@ '-mmmx', # Allows mmintrin.h for MMX intrinsics. ], }], - or OS=="netbsd" or OS=="mac" or OS=="android" or OS=="qnx") and \ - (v8_target_arch=="arm" or v8_target_arch=="ia32")', { - 'ldflags': [ '-m32' ] - ['target_cxx_is_biarch==1', { - 'ldflags': [ '-m32' ], - ], - ['(OS=="linux" or OS=="android") and \ - (v8_target_arch=="x64" or v8_target_arch=="arm64" or \ - v8_target_arch=="ppc64" or v8_target_arch=="s390x")', { - 'cflags': [ '-m64' ], - 'ldflags': [ '-m64' ] - ], - 'conditions': [ - ['target_cxx_is_biarch==1', { - }], - ] - ], ['OS=="android" and v8_android_log_stdout==1', { 'defines': [ 'V8_ANDROID_LOG_STDOUT',
[ "- 'host_cxx_is_biarch%': 0,", "- ['target_arch==\"ia32\" or target_arch==\"x64\" or \\", "- ['(OS==\"linux\" or OS==\"freebsd\" or OS==\"openbsd\" or OS==\"solaris\" \\", "- 'cflags': [ '-m64' ],", "- 'ldflags': [ '-m64' ]," ]
[ 20, 23, 42, 99, 100 ]
{ "additions": 0, "author": "targos", "deletions": 91, "html_url": "https://github.com/nodejs/node/pull/57907", "issue_id": 57907, "merged_at": "2025-04-19T10:22:10Z", "omission_probability": 0.1, "pr_number": 57907, "repo": "nodejs/node", "title": "build: remove redundant `-mXX` flags for V8", "total_changes": 91 }
860
diff --git a/deps/zlib/BUILD.gn b/deps/zlib/BUILD.gn index 378bd0df75ca22..2a898d2a60cfa2 100644 --- a/deps/zlib/BUILD.gn +++ b/deps/zlib/BUILD.gn @@ -70,7 +70,7 @@ source_set("zlib_common_headers") { use_arm_neon_optimizations = false if ((current_cpu == "arm" || current_cpu == "arm64") && !(is_win && !is_clang)) { - # TODO([email protected]): Optimizations temporarily disabled for + # TODO([email protected]): Optimizations temporarily disabled for # Windows on Arm MSVC builds, see http://crbug.com/v8/10012. if (arm_use_neon) { use_arm_neon_optimizations = true diff --git a/deps/zlib/deflate.c b/deps/zlib/deflate.c index 8a5281c2b6cd8d..49496bb3b05618 100644 --- a/deps/zlib/deflate.c +++ b/deps/zlib/deflate.c @@ -485,14 +485,7 @@ int ZEXPORT deflateInit2_(z_streamp strm, int level, int method, s->window = (Bytef *) ZALLOC(strm, s->w_size + WINDOW_PADDING, 2*sizeof(Byte)); - /* Avoid use of unitialized values in the window, see crbug.com/1137613 and - * crbug.com/1144420 */ - zmemzero(s->window, (s->w_size + WINDOW_PADDING) * (2 * sizeof(Byte))); s->prev = (Posf *) ZALLOC(strm, s->w_size, sizeof(Pos)); - /* Avoid use of uninitialized value, see: - * https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=11360 - */ - zmemzero(s->prev, s->w_size * sizeof(Pos)); s->head = (Posf *) ZALLOC(strm, s->hash_size, sizeof(Pos)); s->high_water = 0; /* nothing written to s->window yet */ @@ -551,6 +544,13 @@ int ZEXPORT deflateInit2_(z_streamp strm, int level, int method, deflateEnd (strm); return Z_MEM_ERROR; } + /* Avoid use of unitialized values in the window, see crbug.com/1137613 and + * crbug.com/1144420 */ + zmemzero(s->window, (s->w_size + WINDOW_PADDING) * (2 * sizeof(Byte))); + /* Avoid use of uninitialized value, see: + * https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=11360 + */ + zmemzero(s->prev, s->w_size * sizeof(Pos)); #ifdef LIT_MEM s->d_buf = (ushf *)(s->pending_buf + (s->lit_bufsize << 1)); s->l_buf = s->pending_buf + (s->lit_bufsize << 2); diff --git a/deps/zlib/patches/0017-deflate-move-zmemzero-after-null-check.patch b/deps/zlib/patches/0017-deflate-move-zmemzero-after-null-check.patch new file mode 100644 index 00000000000000..ac8ade53621ae0 --- /dev/null +++ b/deps/zlib/patches/0017-deflate-move-zmemzero-after-null-check.patch @@ -0,0 +1,49 @@ +From 93f86001b67609106c658fe0908a9b7931245b8a Mon Sep 17 00:00:00 2001 +From: pedro martelletto <[email protected]> +Date: Thu, 3 Apr 2025 16:46:42 +0000 +Subject: [PATCH] [zlib] Deflate: move zmemzero after NULL check + +ZALLOC() might fail, in which case dereferencing the returned pointer +results in undefined behaviour. N.B. These conditions are not reachable +from Chromium, as Chromium will abort rather than return nullptr from +malloc. Found by libfido2's fuzz harness. +--- + third_party/zlib/deflate.c | 14 +++++++------- + 1 file changed, 7 insertions(+), 7 deletions(-) + +diff --git a/third_party/zlib/deflate.c b/third_party/zlib/deflate.c +index 8a5281c2b6cd8..49496bb3b0561 100644 +--- a/third_party/zlib/deflate.c ++++ b/third_party/zlib/deflate.c +@@ -485,14 +485,7 @@ int ZEXPORT deflateInit2_(z_streamp strm, int level, int method, + s->window = (Bytef *) ZALLOC(strm, + s->w_size + WINDOW_PADDING, + 2*sizeof(Byte)); +- /* Avoid use of unitialized values in the window, see crbug.com/1137613 and +- * crbug.com/1144420 */ +- zmemzero(s->window, (s->w_size + WINDOW_PADDING) * (2 * sizeof(Byte))); + s->prev = (Posf *) ZALLOC(strm, s->w_size, sizeof(Pos)); +- /* Avoid use of uninitialized value, see: +- * https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=11360 +- */ +- zmemzero(s->prev, s->w_size * sizeof(Pos)); + s->head = (Posf *) ZALLOC(strm, s->hash_size, sizeof(Pos)); + + s->high_water = 0; /* nothing written to s->window yet */ +@@ -551,6 +544,13 @@ int ZEXPORT deflateInit2_(z_streamp strm, int level, int method, + deflateEnd (strm); + return Z_MEM_ERROR; + } ++ /* Avoid use of unitialized values in the window, see crbug.com/1137613 and ++ * crbug.com/1144420 */ ++ zmemzero(s->window, (s->w_size + WINDOW_PADDING) * (2 * sizeof(Byte))); ++ /* Avoid use of uninitialized value, see: ++ * https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=11360 ++ */ ++ zmemzero(s->prev, s->w_size * sizeof(Pos)); + #ifdef LIT_MEM + s->d_buf = (ushf *)(s->pending_buf + (s->lit_bufsize << 1)); + s->l_buf = s->pending_buf + (s->lit_bufsize << 2); +-- +2.49.0.504.g3bcea36a83-goog + diff --git a/src/zlib_version.h b/src/zlib_version.h index adbfb15d6c66f9..7d0fae9137f694 100644 --- a/src/zlib_version.h +++ b/src/zlib_version.h @@ -2,5 +2,5 @@ // Refer to tools/dep_updaters/update-zlib.sh #ifndef SRC_ZLIB_VERSION_H_ #define SRC_ZLIB_VERSION_H_ -#define ZLIB_VERSION "1.3.0.1-motley-788cb3c" +#define ZLIB_VERSION "1.3.0.1-motley-780819f" #endif // SRC_ZLIB_VERSION_H_
diff --git a/deps/zlib/BUILD.gn b/deps/zlib/BUILD.gn index 378bd0df75ca22..2a898d2a60cfa2 100644 --- a/deps/zlib/BUILD.gn +++ b/deps/zlib/BUILD.gn @@ -70,7 +70,7 @@ source_set("zlib_common_headers") { use_arm_neon_optimizations = false if ((current_cpu == "arm" || current_cpu == "arm64") && !(is_win && !is_clang)) { - # TODO([email protected]): Optimizations temporarily disabled for + # TODO([email protected]): Optimizations temporarily disabled for # Windows on Arm MSVC builds, see http://crbug.com/v8/10012. if (arm_use_neon) { use_arm_neon_optimizations = true diff --git a/deps/zlib/deflate.c b/deps/zlib/deflate.c index 8a5281c2b6cd8d..49496bb3b05618 100644 --- a/deps/zlib/deflate.c +++ b/deps/zlib/deflate.c @@ -485,14 +485,7 @@ int ZEXPORT deflateInit2_(z_streamp strm, int level, int method, s->window = (Bytef *) ZALLOC(strm, s->w_size + WINDOW_PADDING, 2*sizeof(Byte)); - * crbug.com/1144420 */ s->prev = (Posf *) ZALLOC(strm, s->w_size, sizeof(Pos)); - /* Avoid use of uninitialized value, see: - * https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=11360 - */ - zmemzero(s->prev, s->w_size * sizeof(Pos)); s->head = (Posf *) ZALLOC(strm, s->hash_size, sizeof(Pos)); s->high_water = 0; /* nothing written to s->window yet */ @@ -551,6 +544,13 @@ int ZEXPORT deflateInit2_(z_streamp strm, int level, int method, deflateEnd (strm); return Z_MEM_ERROR; } + /* Avoid use of unitialized values in the window, see crbug.com/1137613 and + * crbug.com/1144420 */ + zmemzero(s->window, (s->w_size + WINDOW_PADDING) * (2 * sizeof(Byte))); + /* Avoid use of uninitialized value, see: + * https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=11360 + */ + zmemzero(s->prev, s->w_size * sizeof(Pos)); #ifdef LIT_MEM s->d_buf = (ushf *)(s->pending_buf + (s->lit_bufsize << 1)); s->l_buf = s->pending_buf + (s->lit_bufsize << 2); diff --git a/deps/zlib/patches/0017-deflate-move-zmemzero-after-null-check.patch b/deps/zlib/patches/0017-deflate-move-zmemzero-after-null-check.patch new file mode 100644 index 00000000000000..ac8ade53621ae0 --- /dev/null +++ b/deps/zlib/patches/0017-deflate-move-zmemzero-after-null-check.patch @@ -0,0 +1,49 @@ +From 93f86001b67609106c658fe0908a9b7931245b8a Mon Sep 17 00:00:00 2001 +From: pedro martelletto <[email protected]> +Date: Thu, 3 Apr 2025 16:46:42 +0000 +Subject: [PATCH] [zlib] Deflate: move zmemzero after NULL check +ZALLOC() might fail, in which case dereferencing the returned pointer +results in undefined behaviour. N.B. These conditions are not reachable +from Chromium, as Chromium will abort rather than return nullptr from +malloc. Found by libfido2's fuzz harness. +--- + 1 file changed, 7 insertions(+), 7 deletions(-) +diff --git a/third_party/zlib/deflate.c b/third_party/zlib/deflate.c +index 8a5281c2b6cd8..49496bb3b0561 100644 +--- a/third_party/zlib/deflate.c ++++ b/third_party/zlib/deflate.c +@@ -485,14 +485,7 @@ int ZEXPORT deflateInit2_(z_streamp strm, int level, int method, + s->window = (Bytef *) ZALLOC(strm, + s->w_size + WINDOW_PADDING, + 2*sizeof(Byte)); +- /* Avoid use of unitialized values in the window, see crbug.com/1137613 and +- * crbug.com/1144420 */ +- zmemzero(s->window, (s->w_size + WINDOW_PADDING) * (2 * sizeof(Byte))); + s->prev = (Posf *) ZALLOC(strm, s->w_size, sizeof(Pos)); +- /* Avoid use of uninitialized value, see: +- * https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=11360 +- */ +- zmemzero(s->prev, s->w_size * sizeof(Pos)); + s->head = (Posf *) ZALLOC(strm, s->hash_size, sizeof(Pos)); + + s->high_water = 0; /* nothing written to s->window yet */ +@@ -551,6 +544,13 @@ int ZEXPORT deflateInit2_(z_streamp strm, int level, int method, + deflateEnd (strm); + } ++ /* Avoid use of unitialized values in the window, see crbug.com/1137613 and ++ /* Avoid use of uninitialized value, see: ++ * https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=11360 ++ */ + #ifdef LIT_MEM + s->d_buf = (ushf *)(s->pending_buf + (s->lit_bufsize << 1)); + s->l_buf = s->pending_buf + (s->lit_bufsize << 2); +-- +2.49.0.504.g3bcea36a83-goog diff --git a/src/zlib_version.h b/src/zlib_version.h index adbfb15d6c66f9..7d0fae9137f694 100644 --- a/src/zlib_version.h +++ b/src/zlib_version.h @@ -2,5 +2,5 @@ // Refer to tools/dep_updaters/update-zlib.sh #ifndef SRC_ZLIB_VERSION_H_ #define SRC_ZLIB_VERSION_H_ -#define ZLIB_VERSION "1.3.0.1-motley-788cb3c" +#define ZLIB_VERSION "1.3.0.1-motley-780819f" #endif // SRC_ZLIB_VERSION_H_
[ "- /* Avoid use of unitialized values in the window, see crbug.com/1137613 and", "- zmemzero(s->window, (s->w_size + WINDOW_PADDING) * (2 * sizeof(Byte)));", "+ third_party/zlib/deflate.c | 14 +++++++-------", "+ return Z_MEM_ERROR;", "++ * crbug.com/1144420 */", "++ zmemzero(s->window, (s->w_size + WINDOW_PADDING) * (2 * sizeof(Byte)));", "++ zmemzero(s->prev, s->w_size * sizeof(Pos));" ]
[ 21, 23, 62, 86, 89, 90, 94 ]
{ "additions": 57, "author": "nodejs-github-bot", "deletions": 9, "html_url": "https://github.com/nodejs/node/pull/57768", "issue_id": 57768, "merged_at": "2025-04-18T13:22:02Z", "omission_probability": 0.1, "pr_number": 57768, "repo": "nodejs/node", "title": "deps: update zlib to 1.3.0.1-motley-780819f", "total_changes": 66 }
861
diff --git a/doc/api/process.md b/doc/api/process.md index 2ba2794cffec94..8fb0e38834b32c 100644 --- a/doc/api/process.md +++ b/doc/api/process.md @@ -3379,7 +3379,7 @@ any exit or close events and without running any cleanup handler. This function will never return, unless an error occurred. -This function is not available on Windows. +This function is not available on Windows or IBM i. ## `process.report` diff --git a/lib/internal/process/per_thread.js b/lib/internal/process/per_thread.js index 335b8868304bf4..36ac1046dab471 100644 --- a/lib/internal/process/per_thread.js +++ b/lib/internal/process/per_thread.js @@ -286,7 +286,7 @@ function wrapProcessMethods(binding) { if (!isMainThread) { throw new ERR_WORKER_UNSUPPORTED_OPERATION('Calling process.execve'); - } else if (process.platform === 'win32') { + } else if (process.platform === 'win32' || process.platform === 'os400') { throw new ERR_FEATURE_UNAVAILABLE_ON_PLATFORM('process.execve'); } diff --git a/src/node_process_methods.cc b/src/node_process_methods.cc index 3866c612600642..ac51780f1535a3 100644 --- a/src/node_process_methods.cc +++ b/src/node_process_methods.cc @@ -497,7 +497,7 @@ static void ReallyExit(const FunctionCallbackInfo<Value>& args) { env->Exit(code); } -#ifdef __POSIX__ +#if defined __POSIX__ && !defined(__PASE__) inline int persist_standard_stream(int fd) { int flags = fcntl(fd, F_GETFD, 0); @@ -779,7 +779,7 @@ static void CreatePerIsolateProperties(IsolateData* isolate_data, SetMethod(isolate, target, "dlopen", binding::DLOpen); SetMethod(isolate, target, "reallyExit", ReallyExit); -#ifdef __POSIX__ +#if defined __POSIX__ && !defined(__PASE__) SetMethod(isolate, target, "execve", Execve); #endif SetMethodNoSideEffect(isolate, target, "uptime", Uptime); @@ -826,7 +826,7 @@ void RegisterExternalReferences(ExternalReferenceRegistry* registry) { registry->Register(binding::DLOpen); registry->Register(ReallyExit); -#ifdef __POSIX__ +#if defined __POSIX__ && !defined(__PASE__) registry->Register(Execve); #endif registry->Register(Uptime); diff --git a/test/parallel/test-process-execve-abort.js b/test/parallel/test-process-execve-abort.js index 515e1c1f8f5240..4a36944ac83ab0 100644 --- a/test/parallel/test-process-execve-abort.js +++ b/test/parallel/test-process-execve-abort.js @@ -1,14 +1,14 @@ 'use strict'; -const { skip, isWindows } = require('../common'); +const { skip, isWindows, isIBMi } = require('../common'); const { ok } = require('assert'); const { spawnSync } = require('child_process'); const { isMainThread } = require('worker_threads'); if (!isMainThread) { skip('process.execve is not available in Workers'); -} else if (isWindows) { - skip('process.execve is not available in Windows'); +} else if (isWindows || isIBMi) { + skip('process.execve is not available in Windows or IBM i'); } if (process.argv[2] === 'child') { diff --git a/test/parallel/test-process-execve-on-exit.js b/test/parallel/test-process-execve-on-exit.js index e6859b51fe27ce..ff01b0b50e2581 100644 --- a/test/parallel/test-process-execve-on-exit.js +++ b/test/parallel/test-process-execve-on-exit.js @@ -1,13 +1,13 @@ 'use strict'; -const { mustNotCall, skip, isWindows } = require('../common'); +const { mustNotCall, skip, isWindows, isIBMi } = require('../common'); const { strictEqual } = require('assert'); const { isMainThread } = require('worker_threads'); if (!isMainThread) { skip('process.execve is not available in Workers'); -} else if (isWindows) { - skip('process.execve is not available in Windows'); +} else if (isWindows || isIBMi) { + skip('process.execve is not available in Windows or IBM i'); } if (process.argv[2] === 'replaced') { diff --git a/test/parallel/test-process-execve-permission-fail.js b/test/parallel/test-process-execve-permission-fail.js index 0398552edd577e..f1fceca2700245 100644 --- a/test/parallel/test-process-execve-permission-fail.js +++ b/test/parallel/test-process-execve-permission-fail.js @@ -2,14 +2,14 @@ 'use strict'; -const { mustCall, skip, isWindows } = require('../common'); +const { mustCall, skip, isWindows, isIBMi } = require('../common'); const { fail, throws } = require('assert'); const { isMainThread } = require('worker_threads'); if (!isMainThread) { skip('process.execve is not available in Workers'); -} else if (isWindows) { - skip('process.execve is not available in Windows'); +} else if (isWindows || isIBMi) { + skip('process.execve is not available in Windows or IBM i'); } if (process.argv[2] === 'replaced') { diff --git a/test/parallel/test-process-execve-permission-granted.js b/test/parallel/test-process-execve-permission-granted.js index 3521b240f00ab7..f4d36d83f07a29 100644 --- a/test/parallel/test-process-execve-permission-granted.js +++ b/test/parallel/test-process-execve-permission-granted.js @@ -2,14 +2,14 @@ 'use strict'; -const { skip, isWindows } = require('../common'); +const { skip, isWindows, isIBMi } = require('../common'); const { deepStrictEqual } = require('assert'); const { isMainThread } = require('worker_threads'); if (!isMainThread) { skip('process.execve is not available in Workers'); -} else if (isWindows) { - skip('process.execve is not available in Windows'); +} else if (isWindows || isIBMi) { + skip('process.execve is not available in Windows or IBM i'); } if (process.argv[2] === 'replaced') { diff --git a/test/parallel/test-process-execve-socket.js b/test/parallel/test-process-execve-socket.js index 9d85f7ce2bf938..d113f690a09ab9 100644 --- a/test/parallel/test-process-execve-socket.js +++ b/test/parallel/test-process-execve-socket.js @@ -1,14 +1,14 @@ 'use strict'; -const { mustCall, mustNotCall, skip, isWindows } = require('../common'); +const { mustCall, mustNotCall, skip, isWindows, isIBMi } = require('../common'); const { fail, ok } = require('assert'); const { createServer } = require('net'); const { isMainThread } = require('worker_threads'); if (!isMainThread) { skip('process.execve is not available in Workers'); -} else if (isWindows) { - skip('process.execve is not available in Windows'); +} else if (isWindows || isIBMi) { + skip('process.execve is not available in Windows or IBM i'); } if (process.argv[2] === 'replaced') { diff --git a/test/parallel/test-process-execve-validation.js b/test/parallel/test-process-execve-validation.js index 339d53d5737d64..febaa12d06c30f 100644 --- a/test/parallel/test-process-execve-validation.js +++ b/test/parallel/test-process-execve-validation.js @@ -1,6 +1,6 @@ 'use strict'; -const { skip, isWindows } = require('../common'); +const { skip, isWindows, isIBMi } = require('../common'); const { throws } = require('assert'); const { isMainThread } = require('worker_threads'); @@ -8,7 +8,7 @@ if (!isMainThread) { skip('process.execve is not available in Workers'); } -if (!isWindows) { +if (!isWindows && !isIBMi) { // Invalid path name { throws(() => { diff --git a/test/parallel/test-process-execve-worker-threads.js b/test/parallel/test-process-execve-worker-threads.js index 5b93f45bbeb930..551f6f7ac30691 100644 --- a/test/parallel/test-process-execve-worker-threads.js +++ b/test/parallel/test-process-execve-worker-threads.js @@ -1,11 +1,11 @@ 'use strict'; -const { isWindows, mustCall, skip } = require('../common'); +const { isWindows, isIBMi, mustCall, skip } = require('../common'); const { throws } = require('assert'); const { isMainThread, Worker } = require('worker_threads'); -if (isWindows) { - skip('process.execve is not available in Windows'); +if (isWindows || isIBMi) { + skip('process.execve is not available in Windows or IBM i'); } if (isMainThread) { diff --git a/test/parallel/test-process-execve.js b/test/parallel/test-process-execve.js index 1cc9bf87018497..b0d4bc05158f62 100644 --- a/test/parallel/test-process-execve.js +++ b/test/parallel/test-process-execve.js @@ -1,13 +1,13 @@ 'use strict'; -const { isWindows, skip } = require('../common'); +const { isWindows, isIBMi, skip } = require('../common'); const { deepStrictEqual, fail, strictEqual } = require('assert'); const { isMainThread } = require('worker_threads'); if (!isMainThread) { skip('process.execve is not available in Workers'); -} else if (isWindows) { - skip('process.execve is not available in Windows'); +} else if (isWindows || isIBMi) { + skip('process.execve is not available in Windows or IBM i'); } if (process.argv[2] === 'replaced') {
diff --git a/doc/api/process.md b/doc/api/process.md index 2ba2794cffec94..8fb0e38834b32c 100644 --- a/doc/api/process.md +++ b/doc/api/process.md @@ -3379,7 +3379,7 @@ any exit or close events and without running any cleanup handler. This function will never return, unless an error occurred. -This function is not available on Windows. +This function is not available on Windows or IBM i. ## `process.report` diff --git a/lib/internal/process/per_thread.js b/lib/internal/process/per_thread.js index 335b8868304bf4..36ac1046dab471 100644 --- a/lib/internal/process/per_thread.js +++ b/lib/internal/process/per_thread.js @@ -286,7 +286,7 @@ function wrapProcessMethods(binding) { if (!isMainThread) { throw new ERR_WORKER_UNSUPPORTED_OPERATION('Calling process.execve'); - } else if (process.platform === 'win32') { + } else if (process.platform === 'win32' || process.platform === 'os400') { throw new ERR_FEATURE_UNAVAILABLE_ON_PLATFORM('process.execve'); } diff --git a/src/node_process_methods.cc b/src/node_process_methods.cc index 3866c612600642..ac51780f1535a3 100644 --- a/src/node_process_methods.cc +++ b/src/node_process_methods.cc @@ -497,7 +497,7 @@ static void ReallyExit(const FunctionCallbackInfo<Value>& args) { env->Exit(code); inline int persist_standard_stream(int fd) { int flags = fcntl(fd, F_GETFD, 0); @@ -779,7 +779,7 @@ static void CreatePerIsolateProperties(IsolateData* isolate_data, SetMethod(isolate, target, "dlopen", binding::DLOpen); SetMethod(isolate, target, "reallyExit", ReallyExit); SetMethod(isolate, target, "execve", Execve); SetMethodNoSideEffect(isolate, target, "uptime", Uptime); @@ -826,7 +826,7 @@ void RegisterExternalReferences(ExternalReferenceRegistry* registry) { registry->Register(binding::DLOpen); registry->Register(ReallyExit); registry->Register(Execve); registry->Register(Uptime); diff --git a/test/parallel/test-process-execve-abort.js b/test/parallel/test-process-execve-abort.js index 515e1c1f8f5240..4a36944ac83ab0 100644 --- a/test/parallel/test-process-execve-abort.js +++ b/test/parallel/test-process-execve-abort.js const { ok } = require('assert'); const { spawnSync } = require('child_process'); if (process.argv[2] === 'child') { diff --git a/test/parallel/test-process-execve-on-exit.js b/test/parallel/test-process-execve-on-exit.js index e6859b51fe27ce..ff01b0b50e2581 100644 --- a/test/parallel/test-process-execve-on-exit.js +++ b/test/parallel/test-process-execve-on-exit.js +const { mustNotCall, skip, isWindows, isIBMi } = require('../common'); const { strictEqual } = require('assert'); diff --git a/test/parallel/test-process-execve-permission-fail.js b/test/parallel/test-process-execve-permission-fail.js index 0398552edd577e..f1fceca2700245 100644 --- a/test/parallel/test-process-execve-permission-fail.js +++ b/test/parallel/test-process-execve-permission-fail.js -const { mustCall, skip, isWindows } = require('../common'); +const { mustCall, skip, isWindows, isIBMi } = require('../common'); const { fail, throws } = require('assert'); diff --git a/test/parallel/test-process-execve-permission-granted.js b/test/parallel/test-process-execve-permission-granted.js index 3521b240f00ab7..f4d36d83f07a29 100644 --- a/test/parallel/test-process-execve-permission-granted.js +++ b/test/parallel/test-process-execve-permission-granted.js const { deepStrictEqual } = require('assert'); diff --git a/test/parallel/test-process-execve-socket.js b/test/parallel/test-process-execve-socket.js index 9d85f7ce2bf938..d113f690a09ab9 100644 --- a/test/parallel/test-process-execve-socket.js +++ b/test/parallel/test-process-execve-socket.js -const { mustCall, mustNotCall, skip, isWindows } = require('../common'); +const { mustCall, mustNotCall, skip, isWindows, isIBMi } = require('../common'); const { fail, ok } = require('assert'); const { createServer } = require('net'); diff --git a/test/parallel/test-process-execve-validation.js b/test/parallel/test-process-execve-validation.js index 339d53d5737d64..febaa12d06c30f 100644 --- a/test/parallel/test-process-execve-validation.js +++ b/test/parallel/test-process-execve-validation.js @@ -1,6 +1,6 @@ @@ -8,7 +8,7 @@ if (!isMainThread) { -if (!isWindows) { +if (!isWindows && !isIBMi) { // Invalid path name { throws(() => { diff --git a/test/parallel/test-process-execve-worker-threads.js b/test/parallel/test-process-execve-worker-threads.js index 5b93f45bbeb930..551f6f7ac30691 100644 --- a/test/parallel/test-process-execve-worker-threads.js +++ b/test/parallel/test-process-execve-worker-threads.js @@ -1,11 +1,11 @@ -const { isWindows, mustCall, skip } = require('../common'); +const { isWindows, isIBMi, mustCall, skip } = require('../common'); const { isMainThread, Worker } = require('worker_threads'); -if (isWindows) { +if (isWindows || isIBMi) { if (isMainThread) { diff --git a/test/parallel/test-process-execve.js b/test/parallel/test-process-execve.js index 1cc9bf87018497..b0d4bc05158f62 100644 --- a/test/parallel/test-process-execve.js +++ b/test/parallel/test-process-execve.js -const { isWindows, skip } = require('../common'); +const { isWindows, isIBMi, skip } = require('../common'); const { deepStrictEqual, fail, strictEqual } = require('assert');
[ "-const { mustNotCall, skip, isWindows } = require('../common');" ]
[ 86 ]
{ "additions": 28, "author": "abmusse", "deletions": 28, "html_url": "https://github.com/nodejs/node/pull/57883", "issue_id": 57883, "merged_at": "2025-04-18T11:07:51Z", "omission_probability": 0.1, "pr_number": 57883, "repo": "nodejs/node", "title": "process: disable building execve on IBM i", "total_changes": 56 }
862
diff --git a/test/fixtures/test-runner/output/coverage-width-80-color.snapshot b/test/fixtures/test-runner/output/coverage-width-80-color.snapshot index eb94b331a18001..8584d608fa43d8 100644 --- a/test/fixtures/test-runner/output/coverage-width-80-color.snapshot +++ b/test/fixtures/test-runner/output/coverage-width-80-color.snapshot @@ -20,7 +20,7 @@ ℹ output | [31m [34m | [31m [34m | [31m [34m | ℹ [32mcoverage-width-80-color.mjs [34m | [32m100.00[34m | [32m 100.00[34m | [32m 100.00[34m | ℹ ------------------------------------------------------------------------------ -ℹ all files | [33m 61.33[34m | [32m 100.00[34m | [31m 0.00[34m | +ℹ all files | [33m 62.34[34m | [32m 100.00[34m | [31m 0.00[34m | ℹ ------------------------------------------------------------------------------ ℹ end of coverage report [39m \ No newline at end of file diff --git a/test/fixtures/test-runner/output/coverage-width-80-uncovered-lines-color.snapshot b/test/fixtures/test-runner/output/coverage-width-80-uncovered-lines-color.snapshot index b9e56fca6586ac..abc9ed83bb6caf 100644 --- a/test/fixtures/test-runner/output/coverage-width-80-uncovered-lines-color.snapshot +++ b/test/fixtures/test-runner/output/coverage-width-80-uncovered-lines-color.snapshot @@ -21,7 +21,7 @@ ℹ output | [31m [34m | [31m [34m | [31m [34m | ℹ [32mcoverage-width-80-uncovered-lines-color.mjs [34m | [32m100.00[34m | [32m 100.00[34m | [32m 100.00[34m | ℹ -------------------------------------------------------------------------------------------------- -ℹ all files | [33m 52.91[34m | [33m 60.00[34m | [31m 1.61[34m | +ℹ all files | [33m 53.24[34m | [33m 60.00[34m | [31m 1.61[34m | ℹ -------------------------------------------------------------------------------------------------- ℹ end of coverage report [39m \ No newline at end of file diff --git a/test/parallel/test-runner-output.mjs b/test/parallel/test-runner-output.mjs index 4bb22fa2f39d6b..ab7391a757ef2a 100644 --- a/test/parallel/test-runner-output.mjs +++ b/test/parallel/test-runner-output.mjs @@ -1,3 +1,4 @@ +// Flags: --expose-internals import * as common from '../common/index.mjs'; import * as fixtures from '../common/fixtures.mjs'; import * as snapshot from '../common/assertSnapshot.js'; @@ -5,14 +6,13 @@ import { describe, it } from 'node:test'; import { hostname } from 'node:os'; import { chdir, cwd } from 'node:process'; import { fileURLToPath } from 'node:url'; +import internalTTy from 'internal/tty'; const skipForceColors = process.config.variables.icu_gyp_path !== 'tools/icu/icu-generic.gyp' || process.config.variables.node_shared_openssl; -const canColorize = process.stderr?.isTTY && ( - typeof process.stderr?.getColorDepth === 'function' ? - process.stderr?.getColorDepth() > 2 : true); +const canColorize = internalTTy.getColorDepth() > 2; const skipCoverageColors = !canColorize; function replaceTestDuration(str) {
diff --git a/test/fixtures/test-runner/output/coverage-width-80-color.snapshot b/test/fixtures/test-runner/output/coverage-width-80-color.snapshot index eb94b331a18001..8584d608fa43d8 100644 --- a/test/fixtures/test-runner/output/coverage-width-80-color.snapshot +++ b/test/fixtures/test-runner/output/coverage-width-80-color.snapshot @@ -20,7 +20,7 @@ ℹ output | [31m [34m | [31m [34m | [31m [34m | ℹ [32mcoverage-width-80-color.mjs [34m | [32m100.00[34m | [32m 100.00[34m | [32m 100.00[34m | -ℹ all files | [33m 61.33[34m | [32m 100.00[34m | [31m 0.00[34m | +ℹ all files | [33m 62.34[34m | [32m 100.00[34m | [31m 0.00[34m | diff --git a/test/fixtures/test-runner/output/coverage-width-80-uncovered-lines-color.snapshot b/test/fixtures/test-runner/output/coverage-width-80-uncovered-lines-color.snapshot index b9e56fca6586ac..abc9ed83bb6caf 100644 --- a/test/fixtures/test-runner/output/coverage-width-80-uncovered-lines-color.snapshot +++ b/test/fixtures/test-runner/output/coverage-width-80-uncovered-lines-color.snapshot @@ -21,7 +21,7 @@ ℹ output | [31m [34m | [31m [34m | [31m [34m | ℹ [32mcoverage-width-80-uncovered-lines-color.mjs [34m | [32m100.00[34m | [32m 100.00[34m | [32m 100.00[34m | -ℹ all files | [33m 52.91[34m | [33m 60.00[34m | [31m 1.61[34m | +ℹ all files | [33m 53.24[34m | [33m 60.00[34m | [31m 1.61[34m | diff --git a/test/parallel/test-runner-output.mjs b/test/parallel/test-runner-output.mjs index 4bb22fa2f39d6b..ab7391a757ef2a 100644 --- a/test/parallel/test-runner-output.mjs +++ b/test/parallel/test-runner-output.mjs @@ -1,3 +1,4 @@ +// Flags: --expose-internals import * as common from '../common/index.mjs'; import * as fixtures from '../common/fixtures.mjs'; import * as snapshot from '../common/assertSnapshot.js'; @@ -5,14 +6,13 @@ import { describe, it } from 'node:test'; import { hostname } from 'node:os'; import { chdir, cwd } from 'node:process'; import { fileURLToPath } from 'node:url'; +import internalTTy from 'internal/tty'; const skipForceColors = process.config.variables.icu_gyp_path !== 'tools/icu/icu-generic.gyp' || process.config.variables.node_shared_openssl; -const canColorize = process.stderr?.isTTY && ( - typeof process.stderr?.getColorDepth === 'function' ? - process.stderr?.getColorDepth() > 2 : true); +const canColorize = internalTTy.getColorDepth() > 2; const skipCoverageColors = !canColorize; function replaceTestDuration(str) {
[]
[]
{ "additions": 5, "author": "islandryu", "deletions": 5, "html_url": "https://github.com/nodejs/node/pull/57887", "issue_id": 57887, "merged_at": "2025-04-18T11:08:00Z", "omission_probability": 0.1, "pr_number": 57887, "repo": "nodejs/node", "title": "test: Enable skipped colorize test", "total_changes": 10 }
863
diff --git a/doc/api/inspector.md b/doc/api/inspector.md index a0852134511151..abb1b81ca699d3 100644 --- a/doc/api/inspector.md +++ b/doc/api/inspector.md @@ -490,6 +490,8 @@ An exception will be thrown if there is no active inspector. ## Integration with DevTools +> Stability: 1.1 - Active development + The `node:inspector` module provides an API for integrating with devtools that support Chrome DevTools Protocol. DevTools frontends connected to a running Node.js instance can capture protocol events emitted from the instance and display them accordingly to facilitate debugging. @@ -517,8 +519,6 @@ added: - v20.18.0 --> -> Stability: 1 - Experimental - * `params` {Object} This feature is only available with the `--experimental-network-inspection` flag enabled. @@ -534,8 +534,6 @@ added: - v20.18.0 --> -> Stability: 1 - Experimental - * `params` {Object} This feature is only available with the `--experimental-network-inspection` flag enabled. @@ -551,8 +549,6 @@ added: - v20.18.0 --> -> Stability: 1 - Experimental - * `params` {Object} This feature is only available with the `--experimental-network-inspection` flag enabled. @@ -568,8 +564,6 @@ added: - v20.18.0 --> -> Stability: 1 - Experimental - * `params` {Object} This feature is only available with the `--experimental-network-inspection` flag enabled.
diff --git a/doc/api/inspector.md b/doc/api/inspector.md index a0852134511151..abb1b81ca699d3 100644 --- a/doc/api/inspector.md +++ b/doc/api/inspector.md @@ -490,6 +490,8 @@ An exception will be thrown if there is no active inspector. ## Integration with DevTools +> Stability: 1.1 - Active development + The `node:inspector` module provides an API for integrating with devtools that support Chrome DevTools Protocol. DevTools frontends connected to a running Node.js instance can capture protocol events emitted from the instance and display them accordingly to facilitate debugging. @@ -517,8 +519,6 @@ added: @@ -534,8 +534,6 @@ added: @@ -551,8 +549,6 @@ added: @@ -568,8 +564,6 @@ added:
[]
[]
{ "additions": 2, "author": "legendecas", "deletions": 8, "html_url": "https://github.com/nodejs/node/pull/57886", "issue_id": 57886, "merged_at": "2025-04-17T19:38:58Z", "omission_probability": 0.1, "pr_number": 57886, "repo": "nodejs/node", "title": "doc: mark devtools integration section as active development", "total_changes": 10 }
864
diff --git a/src/api/hooks.cc b/src/api/hooks.cc index 54163a59f2f340..f74950ae3de219 100644 --- a/src/api/hooks.cc +++ b/src/api/hooks.cc @@ -196,6 +196,12 @@ async_id AsyncHooksGetExecutionAsyncId(Isolate* isolate) { return env->execution_async_id(); } +async_id AsyncHooksGetExecutionAsyncId(Local<Context> context) { + Environment* env = Environment::GetCurrent(context); + if (env == nullptr) return -1; + return env->execution_async_id(); +} + async_id AsyncHooksGetTriggerAsyncId(Isolate* isolate) { Environment* env = Environment::GetCurrent(isolate); if (env == nullptr) return -1; diff --git a/src/node.h b/src/node.h index d4930b27e7be29..c492185c56d520 100644 --- a/src/node.h +++ b/src/node.h @@ -1404,6 +1404,12 @@ NODE_EXTERN void RequestInterrupt(Environment* env, * I/O from native code. */ NODE_EXTERN async_id AsyncHooksGetExecutionAsyncId(v8::Isolate* isolate); +/* Returns the id of the current execution context. If the return value is + * zero then no execution has been set. This will happen if the user handles + * I/O from native code. */ +NODE_EXTERN async_id +AsyncHooksGetExecutionAsyncId(v8::Local<v8::Context> context); + /* Return same value as async_hooks.triggerAsyncId(); */ NODE_EXTERN async_id AsyncHooksGetTriggerAsyncId(v8::Isolate* isolate); diff --git a/test/addons/async-hooks-id/binding.cc b/test/addons/async-hooks-id/binding.cc index e410563a8bb613..0eb643229c9011 100644 --- a/test/addons/async-hooks-id/binding.cc +++ b/test/addons/async-hooks-id/binding.cc @@ -12,6 +12,11 @@ void GetExecutionAsyncId(const FunctionCallbackInfo<Value>& args) { node::AsyncHooksGetExecutionAsyncId(args.GetIsolate())); } +void GetExecutionAsyncIdWithContext(const FunctionCallbackInfo<Value>& args) { + args.GetReturnValue().Set(node::AsyncHooksGetExecutionAsyncId( + args.GetIsolate()->GetCurrentContext())); +} + void GetTriggerAsyncId(const FunctionCallbackInfo<Value>& args) { args.GetReturnValue().Set( node::AsyncHooksGetTriggerAsyncId(args.GetIsolate())); @@ -19,6 +24,9 @@ void GetTriggerAsyncId(const FunctionCallbackInfo<Value>& args) { void Initialize(Local<Object> exports) { NODE_SET_METHOD(exports, "getExecutionAsyncId", GetExecutionAsyncId); + NODE_SET_METHOD(exports, + "getExecutionAsyncIdWithContext", + GetExecutionAsyncIdWithContext); NODE_SET_METHOD(exports, "getTriggerAsyncId", GetTriggerAsyncId); } diff --git a/test/addons/async-hooks-id/test.js b/test/addons/async-hooks-id/test.js index fd4a88c29f6076..5b0394ddcfb88e 100644 --- a/test/addons/async-hooks-id/test.js +++ b/test/addons/async-hooks-id/test.js @@ -9,6 +9,10 @@ assert.strictEqual( binding.getExecutionAsyncId(), async_hooks.executionAsyncId(), ); +assert.strictEqual( + binding.getExecutionAsyncIdWithContext(), + async_hooks.executionAsyncId(), +); assert.strictEqual( binding.getTriggerAsyncId(), async_hooks.triggerAsyncId(), @@ -19,6 +23,10 @@ process.nextTick(common.mustCall(() => { binding.getExecutionAsyncId(), async_hooks.executionAsyncId(), ); + assert.strictEqual( + binding.getExecutionAsyncIdWithContext(), + async_hooks.executionAsyncId(), + ); assert.strictEqual( binding.getTriggerAsyncId(), async_hooks.triggerAsyncId(),
diff --git a/src/api/hooks.cc b/src/api/hooks.cc index 54163a59f2f340..f74950ae3de219 100644 --- a/src/api/hooks.cc +++ b/src/api/hooks.cc @@ -196,6 +196,12 @@ async_id AsyncHooksGetExecutionAsyncId(Isolate* isolate) { return env->execution_async_id(); +async_id AsyncHooksGetExecutionAsyncId(Local<Context> context) { + Environment* env = Environment::GetCurrent(context); + if (env == nullptr) return -1; async_id AsyncHooksGetTriggerAsyncId(Isolate* isolate) { Environment* env = Environment::GetCurrent(isolate); if (env == nullptr) return -1; diff --git a/src/node.h b/src/node.h index d4930b27e7be29..c492185c56d520 100644 --- a/src/node.h +++ b/src/node.h @@ -1404,6 +1404,12 @@ NODE_EXTERN void RequestInterrupt(Environment* env, * I/O from native code. */ NODE_EXTERN async_id AsyncHooksGetExecutionAsyncId(v8::Isolate* isolate); +/* Returns the id of the current execution context. If the return value is + * I/O from native code. */ +AsyncHooksGetExecutionAsyncId(v8::Local<v8::Context> context); /* Return same value as async_hooks.triggerAsyncId(); */ NODE_EXTERN async_id AsyncHooksGetTriggerAsyncId(v8::Isolate* isolate); diff --git a/test/addons/async-hooks-id/binding.cc b/test/addons/async-hooks-id/binding.cc index e410563a8bb613..0eb643229c9011 100644 --- a/test/addons/async-hooks-id/binding.cc +++ b/test/addons/async-hooks-id/binding.cc @@ -12,6 +12,11 @@ void GetExecutionAsyncId(const FunctionCallbackInfo<Value>& args) { node::AsyncHooksGetExecutionAsyncId(args.GetIsolate())); +void GetExecutionAsyncIdWithContext(const FunctionCallbackInfo<Value>& args) { + args.GetReturnValue().Set(node::AsyncHooksGetExecutionAsyncId( + args.GetIsolate()->GetCurrentContext())); void GetTriggerAsyncId(const FunctionCallbackInfo<Value>& args) { args.GetReturnValue().Set( node::AsyncHooksGetTriggerAsyncId(args.GetIsolate())); @@ -19,6 +24,9 @@ void GetTriggerAsyncId(const FunctionCallbackInfo<Value>& args) { void Initialize(Local<Object> exports) { NODE_SET_METHOD(exports, "getExecutionAsyncId", GetExecutionAsyncId); NODE_SET_METHOD(exports, "getTriggerAsyncId", GetTriggerAsyncId); diff --git a/test/addons/async-hooks-id/test.js b/test/addons/async-hooks-id/test.js index fd4a88c29f6076..5b0394ddcfb88e 100644 --- a/test/addons/async-hooks-id/test.js +++ b/test/addons/async-hooks-id/test.js @@ -9,6 +9,10 @@ assert.strictEqual( binding.getExecutionAsyncId(), async_hooks.executionAsyncId(), ); +assert.strictEqual( + binding.getExecutionAsyncIdWithContext(), + async_hooks.executionAsyncId(), +); assert.strictEqual( binding.getTriggerAsyncId(), async_hooks.triggerAsyncId(), @@ -19,6 +23,10 @@ process.nextTick(common.mustCall(() => { binding.getExecutionAsyncId(), async_hooks.executionAsyncId(), ); + assert.strictEqual( + binding.getExecutionAsyncIdWithContext(), + async_hooks.executionAsyncId(), + ); assert.strictEqual( binding.getTriggerAsyncId(), async_hooks.triggerAsyncId(),
[ "+ return env->execution_async_id();", "+ * zero then no execution has been set. This will happen if the user handles", "+NODE_EXTERN async_id", "+ NODE_SET_METHOD(exports,", "+ \"getExecutionAsyncIdWithContext\",", "+ GetExecutionAsyncIdWithContext);" ]
[ 11, 26, 28, 54, 55, 56 ]
{ "additions": 28, "author": "szegedi", "deletions": 0, "html_url": "https://github.com/nodejs/node/pull/57820", "issue_id": 57820, "merged_at": "2025-04-17T18:00:07Z", "omission_probability": 0.1, "pr_number": 57820, "repo": "nodejs/node", "title": "src: add ExecutionAsyncId getter for any Context", "total_changes": 28 }
865
diff --git a/lib/internal/abort_controller.js b/lib/internal/abort_controller.js index 8ec9034a4f9352..4fbed3bfa7399a 100644 --- a/lib/internal/abort_controller.js +++ b/lib/internal/abort_controller.js @@ -259,14 +259,30 @@ class AbortSignal extends EventTarget { if (!signalsArray.length) { return resultSignal; } + const resultSignalWeakRef = new SafeWeakRef(resultSignal); resultSignal[kSourceSignals] = new SafeSet(); + + // Track if we have any timeout signals + let hasTimeoutSignals = false; + for (let i = 0; i < signalsArray.length; i++) { const signal = signalsArray[i]; + + // Check if this is a timeout signal + if (signal[kTimeout]) { + hasTimeoutSignals = true; + + // Add the timeout signal to gcPersistentSignals to keep it alive + // This is what the kNewListener method would do when adding abort listeners + gcPersistentSignals.add(signal); + } + if (signal.aborted) { abortSignal(resultSignal, signal.reason); return resultSignal; } + signal[kDependantSignals] ??= new SafeSet(); if (!signal[kComposite]) { const signalWeakRef = new SafeWeakRef(signal); @@ -301,6 +317,12 @@ class AbortSignal extends EventTarget { } } } + + // If we have any timeout signals, add the composite signal to gcPersistentSignals + if (hasTimeoutSignals && resultSignal[kSourceSignals].size > 0) { + gcPersistentSignals.add(resultSignal); + } + return resultSignal; } @@ -416,8 +438,10 @@ function abortSignal(signal, reason) { // otherwise to a new "AbortError" DOMException. signal[kAborted] = true; signal[kReason] = reason; + // 3. Let dependentSignalsToAbort be a new list. const dependentSignalsToAbort = ObjectSetPrototypeOf([], null); + // 4. For each dependentSignal of signal's dependent signals: signal[kDependantSignals]?.forEach((s) => { const dependentSignal = s.deref(); @@ -433,12 +457,27 @@ function abortSignal(signal, reason) { // 5. Run the abort steps for signal runAbort(signal); + // 6. For each dependentSignal of dependentSignalsToAbort, // run the abort steps for dependentSignal. for (let i = 0; i < dependentSignalsToAbort.length; i++) { const dependentSignal = dependentSignalsToAbort[i]; runAbort(dependentSignal); } + + // Clean up the signal from gcPersistentSignals + gcPersistentSignals.delete(signal); + + // If this is a composite signal, also remove all of its source signals from gcPersistentSignals + // when they get dereferenced from the signal's kSourceSignals set + if (signal[kComposite] && signal[kSourceSignals]) { + signal[kSourceSignals].forEach((sourceWeakRef) => { + const sourceSignal = sourceWeakRef.deref(); + if (sourceSignal) { + gcPersistentSignals.delete(sourceSignal); + } + }); + } } // To run the abort steps for an AbortSignal signal diff --git a/test/parallel/test-abort-controller-any-timeout.js b/test/parallel/test-abort-controller-any-timeout.js new file mode 100644 index 00000000000000..2d94afaa63d9b4 --- /dev/null +++ b/test/parallel/test-abort-controller-any-timeout.js @@ -0,0 +1,28 @@ +'use strict'; + +require('../common'); +const assert = require('assert'); +const { once } = require('node:events'); +const { describe, it } = require('node:test'); + +describe('AbortSignal.any() with timeout signals', () => { + it('should abort when the first timeout signal fires', async () => { + const signal = AbortSignal.any([AbortSignal.timeout(9000), AbortSignal.timeout(110000)]); + + const abortPromise = Promise.race([ + once(signal, 'abort').then(() => { + throw signal.reason; + }), + new Promise((resolve) => setTimeout(resolve, 10000)), + ]); + + // The promise should be aborted by the 9000ms timeout + await assert.rejects( + () => abortPromise, + { + name: 'TimeoutError', + message: 'The operation was aborted due to timeout' + } + ); + }); +});
diff --git a/lib/internal/abort_controller.js b/lib/internal/abort_controller.js index 8ec9034a4f9352..4fbed3bfa7399a 100644 --- a/lib/internal/abort_controller.js +++ b/lib/internal/abort_controller.js @@ -259,14 +259,30 @@ class AbortSignal extends EventTarget { if (!signalsArray.length) { return resultSignal; const resultSignalWeakRef = new SafeWeakRef(resultSignal); resultSignal[kSourceSignals] = new SafeSet(); + // Track if we have any timeout signals + let hasTimeoutSignals = false; for (let i = 0; i < signalsArray.length; i++) { const signal = signalsArray[i]; + // Check if this is a timeout signal + hasTimeoutSignals = true; + // Add the timeout signal to gcPersistentSignals to keep it alive + // This is what the kNewListener method would do when adding abort listeners + gcPersistentSignals.add(signal); if (signal.aborted) { abortSignal(resultSignal, signal.reason); return resultSignal; signal[kDependantSignals] ??= new SafeSet(); if (!signal[kComposite]) { const signalWeakRef = new SafeWeakRef(signal); @@ -301,6 +317,12 @@ class AbortSignal extends EventTarget { } + // If we have any timeout signals, add the composite signal to gcPersistentSignals + if (hasTimeoutSignals && resultSignal[kSourceSignals].size > 0) { + gcPersistentSignals.add(resultSignal); + } return resultSignal; @@ -416,8 +438,10 @@ function abortSignal(signal, reason) { // otherwise to a new "AbortError" DOMException. signal[kAborted] = true; signal[kReason] = reason; // 3. Let dependentSignalsToAbort be a new list. const dependentSignalsToAbort = ObjectSetPrototypeOf([], null); // 4. For each dependentSignal of signal's dependent signals: signal[kDependantSignals]?.forEach((s) => { const dependentSignal = s.deref(); @@ -433,12 +457,27 @@ function abortSignal(signal, reason) { // 5. Run the abort steps for signal runAbort(signal); // 6. For each dependentSignal of dependentSignalsToAbort, // run the abort steps for dependentSignal. for (let i = 0; i < dependentSignalsToAbort.length; i++) { const dependentSignal = dependentSignalsToAbort[i]; runAbort(dependentSignal); + // Clean up the signal from gcPersistentSignals + gcPersistentSignals.delete(signal); + // If this is a composite signal, also remove all of its source signals from gcPersistentSignals + // when they get dereferenced from the signal's kSourceSignals set + if (signal[kComposite] && signal[kSourceSignals]) { + signal[kSourceSignals].forEach((sourceWeakRef) => { + if (sourceSignal) { + gcPersistentSignals.delete(sourceSignal); + }); + } } // To run the abort steps for an AbortSignal signal diff --git a/test/parallel/test-abort-controller-any-timeout.js b/test/parallel/test-abort-controller-any-timeout.js new file mode 100644 index 00000000000000..2d94afaa63d9b4 --- /dev/null +++ b/test/parallel/test-abort-controller-any-timeout.js @@ -0,0 +1,28 @@ +'use strict'; +require('../common'); +const assert = require('assert'); +const { once } = require('node:events'); +const { describe, it } = require('node:test'); +describe('AbortSignal.any() with timeout signals', () => { + it('should abort when the first timeout signal fires', async () => { + const signal = AbortSignal.any([AbortSignal.timeout(9000), AbortSignal.timeout(110000)]); + const abortPromise = Promise.race([ + once(signal, 'abort').then(() => { + throw signal.reason; + }), + new Promise((resolve) => setTimeout(resolve, 10000)), + ]); + // The promise should be aborted by the 9000ms timeout + await assert.rejects( + () => abortPromise, + { + name: 'TimeoutError', + message: 'The operation was aborted due to timeout' +});
[ "+ if (signal[kTimeout]) {", "+ const sourceSignal = sourceWeakRef.deref();", "+ );", "+ });" ]
[ 19, 78, 118, 119 ]
{ "additions": 67, "author": "gurgunday", "deletions": 0, "html_url": "https://github.com/nodejs/node/pull/57867", "issue_id": 57867, "merged_at": "2025-04-17T14:28:37Z", "omission_probability": 0.1, "pr_number": 57867, "repo": "nodejs/node", "title": "lib: fix AbortSignal.any() with timeout signals", "total_changes": 67 }
866
diff --git a/src/node_options.cc b/src/node_options.cc index f96f9d2de9718e..a6dc00d7d7213f 100644 --- a/src/node_options.cc +++ b/src/node_options.cc @@ -53,19 +53,19 @@ void DebugOptions::CheckOptions(std::vector<std::string>* errors, "`node --inspect-brk` instead."); } - using std::string_view_literals::operator""sv; - const std::vector<std::string_view> destinations = - SplitString(inspect_publish_uid_string, ","sv); + using std::operator""sv; + auto entries = std::views::split(inspect_publish_uid_string, ","sv); inspect_publish_uid.console = false; inspect_publish_uid.http = false; - for (const std::string_view destination : destinations) { + for (const auto& entry : entries) { + std::string_view destination(entry.data(), entry.size()); if (destination == "stderr"sv) { inspect_publish_uid.console = true; } else if (destination == "http"sv) { inspect_publish_uid.http = true; } else { - errors->push_back("--inspect-publish-uid destination can be " - "stderr or http"); + errors->emplace_back("--inspect-publish-uid destination can be " + "stderr or http"); } } } diff --git a/src/node_v8_platform-inl.h b/src/node_v8_platform-inl.h index 2b06dc982e4a91..fc0922035aac07 100644 --- a/src/node_v8_platform-inl.h +++ b/src/node_v8_platform-inl.h @@ -128,19 +128,19 @@ struct V8Platform { inline void StartTracingAgent() { constexpr auto convert_to_set = - [](std::vector<std::string_view> categories) -> std::set<std::string> { + [](auto& categories) -> std::set<std::string> { std::set<std::string> out; for (const auto& s : categories) { - out.emplace(s); + out.emplace(std::string(s.data(), s.size())); } return out; }; // Attach a new NodeTraceWriter only if this function hasn't been called // before. if (tracing_file_writer_.IsDefaultHandle()) { - using std::string_view_literals::operator""sv; - const std::vector<std::string_view> categories = - SplitString(per_process::cli_options->trace_event_categories, ","sv); + using std::operator""sv; + auto categories = std::views::split( + per_process::cli_options->trace_event_categories, ","sv); tracing_file_writer_ = tracing_agent_->AddClient( convert_to_set(categories), diff --git a/src/util.cc b/src/util.cc index 8bf239db6e47d4..17afbec8be3b3c 100644 --- a/src/util.cc +++ b/src/util.cc @@ -220,24 +220,6 @@ std::string GetHumanReadableProcessName() { return SPrintF("%s[%d]", GetProcessTitle("Node.js"), uv_os_getpid()); } -std::vector<std::string_view> SplitString(const std::string_view in, - const std::string_view delim) { - std::vector<std::string_view> out; - - for (auto first = in.data(), second = in.data(), last = first + in.size(); - second != last && first != last; - first = second + 1) { - second = - std::find_first_of(first, last, std::cbegin(delim), std::cend(delim)); - - if (first != second) { - out.emplace_back(first, second - first); - } - } - - return out; -} - void ThrowErrStringTooLong(Isolate* isolate) { isolate->ThrowException(ERR_STRING_TOO_LONG(isolate)); } diff --git a/src/util.h b/src/util.h index 84583d80c598ba..6b414a79bbb613 100644 --- a/src/util.h +++ b/src/util.h @@ -715,8 +715,6 @@ using DeleteFnPtr = typename FunctionDeleter<T, function>::Pointer; inline v8::Maybe<void> FromV8Array(v8::Local<v8::Context> context, v8::Local<v8::Array> js_array, std::vector<v8::Global<v8::Value>>* out); -std::vector<std::string_view> SplitString(const std::string_view in, - const std::string_view delim); inline v8::MaybeLocal<v8::Value> ToV8Value(v8::Local<v8::Context> context, std::string_view str,
diff --git a/src/node_options.cc b/src/node_options.cc index f96f9d2de9718e..a6dc00d7d7213f 100644 --- a/src/node_options.cc +++ b/src/node_options.cc @@ -53,19 +53,19 @@ void DebugOptions::CheckOptions(std::vector<std::string>* errors, "`node --inspect-brk` instead."); - using std::string_view_literals::operator""sv; - const std::vector<std::string_view> destinations = - SplitString(inspect_publish_uid_string, ","sv); + using std::operator""sv; + auto entries = std::views::split(inspect_publish_uid_string, ","sv); inspect_publish_uid.console = false; inspect_publish_uid.http = false; - for (const std::string_view destination : destinations) { + std::string_view destination(entry.data(), entry.size()); if (destination == "stderr"sv) { inspect_publish_uid.console = true; } else if (destination == "http"sv) { inspect_publish_uid.http = true; } else { - errors->push_back("--inspect-publish-uid destination can be " - "stderr or http"); + errors->emplace_back("--inspect-publish-uid destination can be " + "stderr or http"); } diff --git a/src/node_v8_platform-inl.h b/src/node_v8_platform-inl.h index 2b06dc982e4a91..fc0922035aac07 100644 --- a/src/node_v8_platform-inl.h +++ b/src/node_v8_platform-inl.h @@ -128,19 +128,19 @@ struct V8Platform { inline void StartTracingAgent() { constexpr auto convert_to_set = - [](std::vector<std::string_view> categories) -> std::set<std::string> { std::set<std::string> out; for (const auto& s : categories) { - out.emplace(s); + out.emplace(std::string(s.data(), s.size())); } return out; }; // Attach a new NodeTraceWriter only if this function hasn't been called // before. if (tracing_file_writer_.IsDefaultHandle()) { - using std::string_view_literals::operator""sv; - const std::vector<std::string_view> categories = - SplitString(per_process::cli_options->trace_event_categories, ","sv); + using std::operator""sv; + auto categories = std::views::split( + per_process::cli_options->trace_event_categories, ","sv); tracing_file_writer_ = tracing_agent_->AddClient( convert_to_set(categories), diff --git a/src/util.cc b/src/util.cc index 8bf239db6e47d4..17afbec8be3b3c 100644 --- a/src/util.cc +++ b/src/util.cc @@ -220,24 +220,6 @@ std::string GetHumanReadableProcessName() { return SPrintF("%s[%d]", GetProcessTitle("Node.js"), uv_os_getpid()); - const std::string_view delim) { - std::vector<std::string_view> out; - for (auto first = in.data(), second = in.data(), last = first + in.size(); - second != last && first != last; - first = second + 1) { - second = - std::find_first_of(first, last, std::cbegin(delim), std::cend(delim)); - if (first != second) { - out.emplace_back(first, second - first); - } - return out; -} void ThrowErrStringTooLong(Isolate* isolate) { isolate->ThrowException(ERR_STRING_TOO_LONG(isolate)); diff --git a/src/util.h b/src/util.h index 84583d80c598ba..6b414a79bbb613 100644 --- a/src/util.h +++ b/src/util.h @@ -715,8 +715,6 @@ using DeleteFnPtr = typename FunctionDeleter<T, function>::Pointer; inline v8::Maybe<void> FromV8Array(v8::Local<v8::Context> context, v8::Local<v8::Array> js_array, std::vector<v8::Global<v8::Value>>* out); - const std::string_view delim); inline v8::MaybeLocal<v8::Value> ToV8Value(v8::Local<v8::Context> context, std::string_view str,
[ "+ for (const auto& entry : entries) {", "+ [](auto& categories) -> std::set<std::string> {", "- }" ]
[ 16, 39, 80 ]
{ "additions": 11, "author": "anonrig", "deletions": 31, "html_url": "https://github.com/nodejs/node/pull/54990", "issue_id": 54990, "merged_at": "2025-02-10T17:53:12Z", "omission_probability": 0.1, "pr_number": 54990, "repo": "nodejs/node", "title": "src: replace splitstring with built-in", "total_changes": 42 }
867
diff --git a/doc/api/util.md b/doc/api/util.md index d7511bdadfd421..99079a89115ff8 100644 --- a/doc/api/util.md +++ b/doc/api/util.md @@ -436,7 +436,7 @@ corresponding argument. Supported specifiers are: * `%s`: `String` will be used to convert all values except `BigInt`, `Object` and `-0`. `BigInt` values will be represented with an `n` and Objects that - have no user defined `toString` function are inspected using `util.inspect()` + have neither a user defined `toString` function nor `Symbol.toPrimitive` function are inspected using `util.inspect()` with options `{ depth: 0, colors: false, compact: 3 }`. * `%d`: `Number` will be used to convert all values except `BigInt` and `Symbol`. diff --git a/lib/internal/util/inspect.js b/lib/internal/util/inspect.js index f38eecba6ae5fb..976d77fa43b09c 100644 --- a/lib/internal/util/inspect.js +++ b/lib/internal/util/inspect.js @@ -2161,27 +2161,32 @@ function hasBuiltInToString(value) { value = proxyTarget; } - // Check if value has a custom Symbol.toPrimitive transformation. - if (typeof value[SymbolToPrimitive] === 'function') { - return false; - } + let hasOwnToString = ObjectPrototypeHasOwnProperty; + let hasOwnToPrimitive = ObjectPrototypeHasOwnProperty; - // Count objects that have no `toString` function as built-in. + // Count objects without `toString` and `Symbol.toPrimitive` function as built-in. if (typeof value.toString !== 'function') { - return true; - } - - // The object has a own `toString` property. Thus it's not not a built-in one. - if (ObjectPrototypeHasOwnProperty(value, 'toString')) { + if (typeof value[SymbolToPrimitive] !== 'function') { + return true; + } else if (ObjectPrototypeHasOwnProperty(value, SymbolToPrimitive)) { + return false; + } + hasOwnToString = returnFalse; + } else if (ObjectPrototypeHasOwnProperty(value, 'toString')) { + return false; + } else if (typeof value[SymbolToPrimitive] !== 'function') { + hasOwnToPrimitive = returnFalse; + } else if (ObjectPrototypeHasOwnProperty(value, SymbolToPrimitive)) { return false; } - // Find the object that has the `toString` property as own property in the - // prototype chain. + // Find the object that has the `toString` property or `Symbol.toPrimitive` property + // as own property in the prototype chain. let pointer = value; do { pointer = ObjectGetPrototypeOf(pointer); - } while (!ObjectPrototypeHasOwnProperty(pointer, 'toString')); + } while (!hasOwnToString(pointer, 'toString') && + !hasOwnToPrimitive(pointer, SymbolToPrimitive)); // Check closer if the object is a built-in. const descriptor = ObjectGetOwnPropertyDescriptor(pointer, 'constructor'); @@ -2190,6 +2195,10 @@ function hasBuiltInToString(value) { builtInObjects.has(descriptor.value.name); } +function returnFalse() { + return false; +} + const firstErrorLine = (error) => StringPrototypeSplit(error.message, '\n', 1)[0]; let CIRCULAR_ERROR_MESSAGE; function tryStringify(arg) { diff --git a/test/parallel/test-util-format.js b/test/parallel/test-util-format.js index 6f222d0fea0fb8..ad77c7cafd38fb 100644 --- a/test/parallel/test-util-format.js +++ b/test/parallel/test-util-format.js @@ -290,6 +290,68 @@ assert.strictEqual(util.format('%s', -Infinity), '-Infinity'); assert.strictEqual(util.format('%s', objectWithToPrimitive + ''), 'default context'); } +// built-in toPrimitive is the same behavior as inspect +{ + const date = new Date('2023-10-01T00:00:00Z'); + assert.strictEqual(util.format('%s', date), util.inspect(date)); + + const symbol = Symbol('foo'); + assert.strictEqual(util.format('%s', symbol), util.inspect(symbol)); +} + +// Prototype chain handling for toString +{ + function hasToStringButNoToPrimitive() {} + + hasToStringButNoToPrimitive.prototype.toString = function() { + return 'hasToStringButNoToPrimitive'; + }; + + let obj = new hasToStringButNoToPrimitive(); + assert.strictEqual(util.format('%s', obj.toString()), 'hasToStringButNoToPrimitive'); + + function inheritsFromHasToStringButNoToPrimitive() {} + Object.setPrototypeOf(inheritsFromHasToStringButNoToPrimitive.prototype, + hasToStringButNoToPrimitive.prototype); + obj = new inheritsFromHasToStringButNoToPrimitive(); + assert.strictEqual(util.format('%s', obj.toString()), 'hasToStringButNoToPrimitive'); +} + +// Prototype chain handling for Symbol.toPrimitive +{ + function hasToPrimitiveButNoToString() {} + + hasToPrimitiveButNoToString.prototype[Symbol.toPrimitive] = function() { + return 'hasToPrimitiveButNoToString'; + }; + + let obj = new hasToPrimitiveButNoToString(); + assert.strictEqual(util.format('%s', obj[Symbol.toPrimitive]()), 'hasToPrimitiveButNoToString'); + function inheritsFromHasToPrimitiveButNoToString() {} + Object.setPrototypeOf(inheritsFromHasToPrimitiveButNoToString.prototype, + hasToPrimitiveButNoToString.prototype); + obj = new inheritsFromHasToPrimitiveButNoToString(); + assert.strictEqual(util.format('%s', obj[Symbol.toPrimitive]()), 'hasToPrimitiveButNoToString'); +} + +// Prototype chain handling for both toString and Symbol.toPrimitive +{ + function hasBothToStringAndToPrimitive() {} + hasBothToStringAndToPrimitive.prototype.toString = function() { + return 'toString'; + }; + hasBothToStringAndToPrimitive.prototype[Symbol.toPrimitive] = function() { + return 'toPrimitive'; + }; + let obj = new hasBothToStringAndToPrimitive(); + assert.strictEqual(util.format('%s', obj.toString()), 'toString'); + function inheritsFromHasBothToStringAndToPrimitive() {} + Object.setPrototypeOf(inheritsFromHasBothToStringAndToPrimitive.prototype, + hasBothToStringAndToPrimitive.prototype); + obj = new inheritsFromHasBothToStringAndToPrimitive(); + assert.strictEqual(util.format('%s', obj.toString()), 'toString'); +} + // JSON format specifier assert.strictEqual(util.format('%j'), '%j'); assert.strictEqual(util.format('%j', 42), '42');
diff --git a/doc/api/util.md b/doc/api/util.md index d7511bdadfd421..99079a89115ff8 100644 --- a/doc/api/util.md +++ b/doc/api/util.md @@ -436,7 +436,7 @@ corresponding argument. Supported specifiers are: * `%s`: `String` will be used to convert all values except `BigInt`, `Object` and `-0`. `BigInt` values will be represented with an `n` and Objects that - have no user defined `toString` function are inspected using `util.inspect()` + have neither a user defined `toString` function nor `Symbol.toPrimitive` function are inspected using `util.inspect()` with options `{ depth: 0, colors: false, compact: 3 }`. * `%d`: `Number` will be used to convert all values except `BigInt` and `Symbol`. diff --git a/lib/internal/util/inspect.js b/lib/internal/util/inspect.js index f38eecba6ae5fb..976d77fa43b09c 100644 --- a/lib/internal/util/inspect.js +++ b/lib/internal/util/inspect.js @@ -2161,27 +2161,32 @@ function hasBuiltInToString(value) { value = proxyTarget; - // Check if value has a custom Symbol.toPrimitive transformation. - if (typeof value[SymbolToPrimitive] === 'function') { - return false; + let hasOwnToString = ObjectPrototypeHasOwnProperty; + let hasOwnToPrimitive = ObjectPrototypeHasOwnProperty; - // Count objects that have no `toString` function as built-in. + // Count objects without `toString` and `Symbol.toPrimitive` function as built-in. if (typeof value.toString !== 'function') { - return true; - - if (ObjectPrototypeHasOwnProperty(value, 'toString')) { + if (typeof value[SymbolToPrimitive] !== 'function') { + return true; + } else if (ObjectPrototypeHasOwnProperty(value, SymbolToPrimitive)) { + return false; + } + hasOwnToString = returnFalse; + } else if (ObjectPrototypeHasOwnProperty(value, 'toString')) { + return false; + hasOwnToPrimitive = returnFalse; return false; - // Find the object that has the `toString` property as own property in the - // prototype chain. + // Find the object that has the `toString` property or `Symbol.toPrimitive` property + // as own property in the prototype chain. let pointer = value; do { pointer = ObjectGetPrototypeOf(pointer); - } while (!ObjectPrototypeHasOwnProperty(pointer, 'toString')); + } while (!hasOwnToString(pointer, 'toString') && + !hasOwnToPrimitive(pointer, SymbolToPrimitive)); // Check closer if the object is a built-in. const descriptor = ObjectGetOwnPropertyDescriptor(pointer, 'constructor'); @@ -2190,6 +2195,10 @@ function hasBuiltInToString(value) { builtInObjects.has(descriptor.value.name); +function returnFalse() { + return false; const firstErrorLine = (error) => StringPrototypeSplit(error.message, '\n', 1)[0]; let CIRCULAR_ERROR_MESSAGE; function tryStringify(arg) { diff --git a/test/parallel/test-util-format.js b/test/parallel/test-util-format.js index 6f222d0fea0fb8..ad77c7cafd38fb 100644 --- a/test/parallel/test-util-format.js +++ b/test/parallel/test-util-format.js @@ -290,6 +290,68 @@ assert.strictEqual(util.format('%s', -Infinity), '-Infinity'); assert.strictEqual(util.format('%s', objectWithToPrimitive + ''), 'default context'); +// built-in toPrimitive is the same behavior as inspect + const date = new Date('2023-10-01T00:00:00Z'); + assert.strictEqual(util.format('%s', date), util.inspect(date)); + const symbol = Symbol('foo'); + assert.strictEqual(util.format('%s', symbol), util.inspect(symbol)); +// Prototype chain handling for toString + return 'hasToStringButNoToPrimitive'; + let obj = new hasToStringButNoToPrimitive(); + function inheritsFromHasToStringButNoToPrimitive() {} + Object.setPrototypeOf(inheritsFromHasToStringButNoToPrimitive.prototype, + hasToStringButNoToPrimitive.prototype); + obj = new inheritsFromHasToStringButNoToPrimitive(); +// Prototype chain handling for Symbol.toPrimitive + function hasToPrimitiveButNoToString() {} + return 'hasToPrimitiveButNoToString'; + function inheritsFromHasToPrimitiveButNoToString() {} + Object.setPrototypeOf(inheritsFromHasToPrimitiveButNoToString.prototype, + hasToPrimitiveButNoToString.prototype); + obj = new inheritsFromHasToPrimitiveButNoToString(); +// Prototype chain handling for both toString and Symbol.toPrimitive + hasBothToStringAndToPrimitive.prototype.toString = function() { + return 'toString'; + return 'toPrimitive'; + function inheritsFromHasBothToStringAndToPrimitive() {} + Object.setPrototypeOf(inheritsFromHasBothToStringAndToPrimitive.prototype, + hasBothToStringAndToPrimitive.prototype); + obj = new inheritsFromHasBothToStringAndToPrimitive(); // JSON format specifier assert.strictEqual(util.format('%j'), '%j'); assert.strictEqual(util.format('%j', 42), '42');
[ "- // The object has a own `toString` property. Thus it's not not a built-in one.", "+ } else if (typeof value[SymbolToPrimitive] !== 'function') {", "+ } else if (ObjectPrototypeHasOwnProperty(value, SymbolToPrimitive)) {", "+ function hasToStringButNoToPrimitive() {}", "+ hasToStringButNoToPrimitive.prototype.toString = function() {", "+ hasToPrimitiveButNoToString.prototype[Symbol.toPrimitive] = function() {", "+ let obj = new hasToPrimitiveButNoToString();", "+ function hasBothToStringAndToPrimitive() {}", "+ hasBothToStringAndToPrimitive.prototype[Symbol.toPrimitive] = function() {", "+ let obj = new hasBothToStringAndToPrimitive();" ]
[ 34, 44, 46, 93, 95, 113, 117, 128, 132, 135 ]
{ "additions": 85, "author": "islandryu", "deletions": 14, "html_url": "https://github.com/nodejs/node/pull/57832", "issue_id": 57832, "merged_at": "2025-04-17T13:35:57Z", "omission_probability": 0.1, "pr_number": 57832, "repo": "nodejs/node", "title": "util: fix formatting of objects with built-in Symbol.toPrimitive", "total_changes": 99 }
868
diff --git a/lib/internal/crypto/random.js b/lib/internal/crypto/random.js index 80cf3224744ec7..a035dd265e05e0 100644 --- a/lib/internal/crypto/random.js +++ b/lib/internal/crypto/random.js @@ -57,6 +57,7 @@ const { isArrayBufferView, isAnyArrayBuffer, isTypedArray, + isFloat16Array, isFloat32Array, isFloat64Array, } = require('internal/util/types'); @@ -315,6 +316,7 @@ function onJobDone(buf, callback, error) { // be an integer-type TypedArray. function getRandomValues(data) { if (!isTypedArray(data) || + isFloat16Array(data) || isFloat32Array(data) || isFloat64Array(data)) { // Ordinarily this would be an ERR_INVALID_ARG_TYPE. However, diff --git a/test/wpt/status/WebCryptoAPI.cjs b/test/wpt/status/WebCryptoAPI.cjs index 0057d5f72cc937..709d34b8f47c40 100644 --- a/test/wpt/status/WebCryptoAPI.cjs +++ b/test/wpt/status/WebCryptoAPI.cjs @@ -11,14 +11,6 @@ module.exports = { 'historical.any.js': { 'skip': 'Not relevant in Node.js context', }, - 'getRandomValues.any.js': { - 'fail': { - 'note': 'Node.js does not support Float16Array', - 'expected': [ - 'Float16 arrays', - ], - }, - }, 'sign_verify/eddsa_small_order_points.https.any.js': { 'fail': { 'note': 'see https://github.com/nodejs/node/issues/54572', diff --git a/test/wpt/test-webcrypto.js b/test/wpt/test-webcrypto.js index 0d53a51901bbb9..06cd091df6a50a 100644 --- a/test/wpt/test-webcrypto.js +++ b/test/wpt/test-webcrypto.js @@ -1,3 +1,5 @@ +// Flags: --js-float16array +// TODO(LiviaMedeiros): once `Float16Array` is unflagged in v8, remove the line above 'use strict'; const common = require('../common');
diff --git a/lib/internal/crypto/random.js b/lib/internal/crypto/random.js index 80cf3224744ec7..a035dd265e05e0 100644 --- a/lib/internal/crypto/random.js +++ b/lib/internal/crypto/random.js @@ -57,6 +57,7 @@ const { isArrayBufferView, isAnyArrayBuffer, isTypedArray, isFloat32Array, isFloat64Array, } = require('internal/util/types'); @@ -315,6 +316,7 @@ function onJobDone(buf, callback, error) { // be an integer-type TypedArray. function getRandomValues(data) { if (!isTypedArray(data) || isFloat32Array(data) || isFloat64Array(data)) { // Ordinarily this would be an ERR_INVALID_ARG_TYPE. However, diff --git a/test/wpt/status/WebCryptoAPI.cjs b/test/wpt/status/WebCryptoAPI.cjs index 0057d5f72cc937..709d34b8f47c40 100644 --- a/test/wpt/status/WebCryptoAPI.cjs +++ b/test/wpt/status/WebCryptoAPI.cjs @@ -11,14 +11,6 @@ module.exports = { 'historical.any.js': { 'skip': 'Not relevant in Node.js context', }, - 'fail': { - 'note': 'Node.js does not support Float16Array', - 'expected': [ - 'Float16 arrays', - ], - }, - }, 'sign_verify/eddsa_small_order_points.https.any.js': { 'fail': { 'note': 'see https://github.com/nodejs/node/issues/54572', diff --git a/test/wpt/test-webcrypto.js b/test/wpt/test-webcrypto.js index 0d53a51901bbb9..06cd091df6a50a 100644 --- a/test/wpt/test-webcrypto.js +++ b/test/wpt/test-webcrypto.js @@ -1,3 +1,5 @@ +// Flags: --js-float16array +// TODO(LiviaMedeiros): once `Float16Array` is unflagged in v8, remove the line above 'use strict'; const common = require('../common');
[ "+ isFloat16Array,", "+ isFloat16Array(data) ||", "- 'getRandomValues.any.js': {" ]
[ 8, 16, 28 ]
{ "additions": 4, "author": "LiviaMedeiros", "deletions": 8, "html_url": "https://github.com/nodejs/node/pull/57880", "issue_id": 57880, "merged_at": "2025-04-17T07:26:53Z", "omission_probability": 0.1, "pr_number": 57880, "repo": "nodejs/node", "title": "crypto: forbid passing `Float16Array` to `getRandomValues()`", "total_changes": 12 }
869
diff --git a/doc/api/util.md b/doc/api/util.md index 5c8fd8a074c878..ef11b744c5db12 100644 --- a/doc/api/util.md +++ b/doc/api/util.md @@ -3102,6 +3102,23 @@ types.isExternal(new String('foo')); // returns false For further information on `napi_create_external`, refer to [`napi_create_external()`][]. +### `util.types.isFloat16Array(value)` + +<!-- YAML +added: REPLACEME +--> + +* `value` {any} +* Returns: {boolean} + +Returns `true` if the value is a built-in {Float16Array} instance. + +```js +util.types.isFloat16Array(new ArrayBuffer()); // Returns false +util.types.isFloat16Array(new Float16Array()); // Returns true +util.types.isFloat16Array(new Float32Array()); // Returns false +``` + ### `util.types.isFloat32Array(value)` <!-- YAML diff --git a/lib/internal/util/types.js b/lib/internal/util/types.js index e40700b38f81a9..393608331aa1f5 100644 --- a/lib/internal/util/types.js +++ b/lib/internal/util/types.js @@ -38,6 +38,10 @@ function isInt32Array(value) { return TypedArrayPrototypeGetSymbolToStringTag(value) === 'Int32Array'; } +function isFloat16Array(value) { + return TypedArrayPrototypeGetSymbolToStringTag(value) === 'Float16Array'; +} + function isFloat32Array(value) { return TypedArrayPrototypeGetSymbolToStringTag(value) === 'Float32Array'; } @@ -65,6 +69,7 @@ module.exports = { isInt8Array, isInt16Array, isInt32Array, + isFloat16Array, isFloat32Array, isFloat64Array, isBigInt64Array, diff --git a/test/parallel/test-util-types.js b/test/parallel/test-util-types.js index 04bc2298a0afbb..75b77a03e1ac81 100644 --- a/test/parallel/test-util-types.js +++ b/test/parallel/test-util-types.js @@ -1,4 +1,5 @@ -// Flags: --experimental-vm-modules --expose-internals --allow-natives-syntax +// Flags: --experimental-vm-modules --expose-internals --allow-natives-syntax --js-float16array +// TODO(LiviaMedeiros): once `Float16Array` is unflagged in v8, remove `--js-float16array` above 'use strict'; const common = require('../common'); const assert = require('assert'); @@ -9,6 +10,9 @@ const { JSStream } = internalBinding('js_stream'); const external = (new JSStream())._externalStream; +// TODO(LiviaMedeiros): once linter recognizes `Float16Array`, remove next line +const { Float16Array } = globalThis; + for (const [ value, _method ] of [ [ external, 'isExternal' ], [ new Date() ], @@ -38,6 +42,7 @@ for (const [ value, _method ] of [ [ new Int8Array() ], [ new Int16Array() ], [ new Int32Array() ], + [ new Float16Array() ], [ new Float32Array() ], [ new Float64Array() ], [ new BigInt64Array() ], @@ -102,6 +107,9 @@ for (const [ value, _method ] of [ assert(!types.isInt32Array({ [Symbol.toStringTag]: 'Int32Array' })); assert(types.isInt32Array(vm.runInNewContext('new Int32Array'))); + assert(!types.isFloat16Array({ [Symbol.toStringTag]: 'Float16Array' })); + assert(types.isFloat16Array(vm.runInNewContext('new Float16Array'))); + assert(!types.isFloat32Array({ [Symbol.toStringTag]: 'Float32Array' })); assert(types.isFloat32Array(vm.runInNewContext('new Float32Array'))); @@ -127,6 +135,7 @@ for (const [ value, _method ] of [ const int8Array = new Int8Array(arrayBuffer); const int16Array = new Int16Array(arrayBuffer); const int32Array = new Int32Array(arrayBuffer); + const float16Array = new Float16Array(arrayBuffer); const float32Array = new Float32Array(arrayBuffer); const float64Array = new Float64Array(arrayBuffer); const bigInt64Array = new BigInt64Array(arrayBuffer); @@ -141,6 +150,7 @@ for (const [ value, _method ] of [ const fakeInt8Array = { __proto__: Int8Array.prototype }; const fakeInt16Array = { __proto__: Int16Array.prototype }; const fakeInt32Array = { __proto__: Int32Array.prototype }; + const fakeFloat16Array = { __proto__: Float16Array.prototype }; const fakeFloat32Array = { __proto__: Float32Array.prototype }; const fakeFloat64Array = { __proto__: Float64Array.prototype }; const fakeBigInt64Array = { __proto__: BigInt64Array.prototype }; @@ -164,6 +174,10 @@ for (const [ value, _method ] of [ Object.setPrototypeOf(new Int16Array(arrayBuffer), Int16Array.prototype); const stealthyInt32Array = Object.setPrototypeOf(new Int32Array(arrayBuffer), Int32Array.prototype); + const stealthyFloat16Array = + Object.setPrototypeOf( + new Float16Array(arrayBuffer), Float16Array.prototype + ); const stealthyFloat32Array = Object.setPrototypeOf( new Float32Array(arrayBuffer), Float32Array.prototype @@ -191,6 +205,7 @@ for (const [ value, _method ] of [ int8Array, fakeInt8Array, stealthyInt8Array, int16Array, fakeInt16Array, stealthyInt16Array, int32Array, fakeInt32Array, stealthyInt32Array, + float16Array, fakeFloat16Array, stealthyFloat16Array, float32Array, fakeFloat32Array, stealthyFloat32Array, float64Array, fakeFloat64Array, stealthyFloat64Array, bigInt64Array, fakeBigInt64Array, stealthyBigInt64Array, @@ -208,6 +223,7 @@ for (const [ value, _method ] of [ int8Array, stealthyInt8Array, int16Array, stealthyInt16Array, int32Array, stealthyInt32Array, + float16Array, stealthyFloat16Array, float32Array, stealthyFloat32Array, float64Array, stealthyFloat64Array, bigInt64Array, stealthyBigInt64Array, @@ -222,6 +238,7 @@ for (const [ value, _method ] of [ int8Array, stealthyInt8Array, int16Array, stealthyInt16Array, int32Array, stealthyInt32Array, + float16Array, stealthyFloat16Array, float32Array, stealthyFloat32Array, float64Array, stealthyFloat64Array, bigInt64Array, stealthyBigInt64Array, @@ -248,6 +265,9 @@ for (const [ value, _method ] of [ isInt32Array: [ int32Array, stealthyInt32Array, ], + isFloat16Array: [ + float16Array, stealthyFloat16Array, + ], isFloat32Array: [ float32Array, stealthyFloat32Array, ], diff --git a/tools/doc/type-parser.mjs b/tools/doc/type-parser.mjs index 441847c75839aa..9485a85e4bbb70 100644 --- a/tools/doc/type-parser.mjs +++ b/tools/doc/type-parser.mjs @@ -19,7 +19,7 @@ const jsGlobalTypes = [ 'TypeError', 'URIError', 'WeakMap', 'WeakSet', 'TypedArray', - 'Float32Array', 'Float64Array', + 'Float16Array', 'Float32Array', 'Float64Array', 'Int8Array', 'Int16Array', 'Int32Array', 'Uint8Array', 'Uint8ClampedArray', 'Uint16Array', 'Uint32Array', ];
diff --git a/doc/api/util.md b/doc/api/util.md index 5c8fd8a074c878..ef11b744c5db12 100644 --- a/doc/api/util.md +++ b/doc/api/util.md @@ -3102,6 +3102,23 @@ types.isExternal(new String('foo')); // returns false For further information on `napi_create_external`, refer to [`napi_create_external()`][]. +### `util.types.isFloat16Array(value)` +<!-- YAML +added: REPLACEME +--> +* `value` {any} +* Returns: {boolean} +Returns `true` if the value is a built-in {Float16Array} instance. +```js +util.types.isFloat16Array(new ArrayBuffer()); // Returns false +util.types.isFloat16Array(new Float16Array()); // Returns true +util.types.isFloat16Array(new Float32Array()); // Returns false +``` ### `util.types.isFloat32Array(value)` <!-- YAML diff --git a/lib/internal/util/types.js b/lib/internal/util/types.js index e40700b38f81a9..393608331aa1f5 100644 --- a/lib/internal/util/types.js +++ b/lib/internal/util/types.js @@ -38,6 +38,10 @@ function isInt32Array(value) { return TypedArrayPrototypeGetSymbolToStringTag(value) === 'Int32Array'; +function isFloat16Array(value) { + return TypedArrayPrototypeGetSymbolToStringTag(value) === 'Float16Array'; +} function isFloat32Array(value) { return TypedArrayPrototypeGetSymbolToStringTag(value) === 'Float32Array'; @@ -65,6 +69,7 @@ module.exports = { isInt8Array, isInt16Array, isInt32Array, + isFloat16Array, isFloat32Array, isFloat64Array, isBigInt64Array, diff --git a/test/parallel/test-util-types.js b/test/parallel/test-util-types.js index 04bc2298a0afbb..75b77a03e1ac81 100644 --- a/test/parallel/test-util-types.js +++ b/test/parallel/test-util-types.js @@ -1,4 +1,5 @@ -// Flags: --experimental-vm-modules --expose-internals --allow-natives-syntax +// Flags: --experimental-vm-modules --expose-internals --allow-natives-syntax --js-float16array 'use strict'; const common = require('../common'); const assert = require('assert'); @@ -9,6 +10,9 @@ const { JSStream } = internalBinding('js_stream'); const external = (new JSStream())._externalStream; +// TODO(LiviaMedeiros): once linter recognizes `Float16Array`, remove next line +const { Float16Array } = globalThis; for (const [ value, _method ] of [ [ external, 'isExternal' ], [ new Date() ], @@ -38,6 +42,7 @@ for (const [ value, _method ] of [ [ new Int8Array() ], [ new Int16Array() ], [ new Int32Array() ], + [ new Float16Array() ], [ new Float32Array() ], [ new Float64Array() ], [ new BigInt64Array() ], @@ -102,6 +107,9 @@ for (const [ value, _method ] of [ assert(!types.isInt32Array({ [Symbol.toStringTag]: 'Int32Array' })); assert(types.isInt32Array(vm.runInNewContext('new Int32Array'))); + assert(!types.isFloat16Array({ [Symbol.toStringTag]: 'Float16Array' })); + assert(types.isFloat16Array(vm.runInNewContext('new Float16Array'))); assert(!types.isFloat32Array({ [Symbol.toStringTag]: 'Float32Array' })); assert(types.isFloat32Array(vm.runInNewContext('new Float32Array'))); @@ -127,6 +135,7 @@ for (const [ value, _method ] of [ const int8Array = new Int8Array(arrayBuffer); const int16Array = new Int16Array(arrayBuffer); const int32Array = new Int32Array(arrayBuffer); + const float16Array = new Float16Array(arrayBuffer); const float32Array = new Float32Array(arrayBuffer); const float64Array = new Float64Array(arrayBuffer); const bigInt64Array = new BigInt64Array(arrayBuffer); @@ -141,6 +150,7 @@ for (const [ value, _method ] of [ const fakeInt8Array = { __proto__: Int8Array.prototype }; const fakeInt16Array = { __proto__: Int16Array.prototype }; const fakeInt32Array = { __proto__: Int32Array.prototype }; + const fakeFloat16Array = { __proto__: Float16Array.prototype }; const fakeFloat32Array = { __proto__: Float32Array.prototype }; const fakeFloat64Array = { __proto__: Float64Array.prototype }; const fakeBigInt64Array = { __proto__: BigInt64Array.prototype }; @@ -164,6 +174,10 @@ for (const [ value, _method ] of [ Object.setPrototypeOf(new Int16Array(arrayBuffer), Int16Array.prototype); const stealthyInt32Array = Object.setPrototypeOf(new Int32Array(arrayBuffer), Int32Array.prototype); + Object.setPrototypeOf( + new Float16Array(arrayBuffer), Float16Array.prototype + ); const stealthyFloat32Array = Object.setPrototypeOf( new Float32Array(arrayBuffer), Float32Array.prototype @@ -191,6 +205,7 @@ for (const [ value, _method ] of [ int8Array, fakeInt8Array, stealthyInt8Array, int16Array, fakeInt16Array, stealthyInt16Array, int32Array, fakeInt32Array, stealthyInt32Array, + float16Array, fakeFloat16Array, stealthyFloat16Array, float32Array, fakeFloat32Array, stealthyFloat32Array, float64Array, fakeFloat64Array, stealthyFloat64Array, bigInt64Array, fakeBigInt64Array, stealthyBigInt64Array, @@ -208,6 +223,7 @@ for (const [ value, _method ] of [ @@ -222,6 +238,7 @@ for (const [ value, _method ] of [ @@ -248,6 +265,9 @@ for (const [ value, _method ] of [ isInt32Array: [ + isFloat16Array: [ isFloat32Array: [ diff --git a/tools/doc/type-parser.mjs b/tools/doc/type-parser.mjs index 441847c75839aa..9485a85e4bbb70 100644 --- a/tools/doc/type-parser.mjs +++ b/tools/doc/type-parser.mjs @@ -19,7 +19,7 @@ const jsGlobalTypes = [ 'TypeError', 'URIError', 'WeakMap', 'WeakSet', 'TypedArray', - 'Float32Array', 'Float64Array', + 'Float16Array', 'Float32Array', 'Float64Array', 'Int8Array', 'Int16Array', 'Int32Array', 'Uint8Array', 'Uint8ClampedArray', 'Uint16Array', 'Uint32Array', ];
[ "+// TODO(LiviaMedeiros): once `Float16Array` is unflagged in v8, remove `--js-float16array` above", "+ const stealthyFloat16Array =", "+ ]," ]
[ 58, 110, 147 ]
{ "additions": 44, "author": "LiviaMedeiros", "deletions": 2, "html_url": "https://github.com/nodejs/node/pull/57879", "issue_id": 57879, "merged_at": "2025-04-16T22:09:03Z", "omission_probability": 0.1, "pr_number": 57879, "repo": "nodejs/node", "title": "util: add `types.isFloat16Array()`", "total_changes": 46 }
870
diff --git a/src/node_process_methods.cc b/src/node_process_methods.cc index 3866c612600642..5c3505f25ab94f 100644 --- a/src/node_process_methods.cc +++ b/src/node_process_methods.cc @@ -50,6 +50,7 @@ using v8::HeapStatistics; using v8::Integer; using v8::Isolate; using v8::Local; +using v8::LocalVector; using v8::Maybe; using v8::NewStringType; using v8::Number; @@ -289,7 +290,7 @@ static void Uptime(const FunctionCallbackInfo<Value>& args) { static void GetActiveRequests(const FunctionCallbackInfo<Value>& args) { Environment* env = Environment::GetCurrent(args); - std::vector<Local<Value>> request_v; + LocalVector<Value> request_v(env->isolate()); for (ReqWrapBase* req_wrap : *env->req_wrap_queue()) { AsyncWrap* w = req_wrap->GetAsyncWrap(); if (w->persistent().IsEmpty()) @@ -306,7 +307,7 @@ static void GetActiveRequests(const FunctionCallbackInfo<Value>& args) { void GetActiveHandles(const FunctionCallbackInfo<Value>& args) { Environment* env = Environment::GetCurrent(args); - std::vector<Local<Value>> handle_v; + LocalVector<Value> handle_v(env->isolate()); for (auto w : *env->handle_wrap_queue()) { if (!HandleWrap::HasRef(w)) continue; @@ -318,7 +319,7 @@ void GetActiveHandles(const FunctionCallbackInfo<Value>& args) { static void GetActiveResourcesInfo(const FunctionCallbackInfo<Value>& args) { Environment* env = Environment::GetCurrent(args); - std::vector<Local<Value>> resources_info; + LocalVector<Value> resources_info(env->isolate()); // Active requests for (ReqWrapBase* req_wrap : *env->req_wrap_queue()) { @@ -336,14 +337,17 @@ static void GetActiveResourcesInfo(const FunctionCallbackInfo<Value>& args) { } // Active timeouts - resources_info.insert(resources_info.end(), - env->timeout_info()[0], - FIXED_ONE_BYTE_STRING(env->isolate(), "Timeout")); + Local<Value> timeout_str = FIXED_ONE_BYTE_STRING(env->isolate(), "Timeout"); + for (int i = 0; i < env->timeout_info()[0]; ++i) { + resources_info.push_back(timeout_str); + } // Active immediates - resources_info.insert(resources_info.end(), - env->immediate_info()->ref_count(), - FIXED_ONE_BYTE_STRING(env->isolate(), "Immediate")); + Local<Value> immediate_str = + FIXED_ONE_BYTE_STRING(env->isolate(), "Immediate"); + for (uint32_t i = 0; i < env->immediate_info()->ref_count(); ++i) { + resources_info.push_back(immediate_str); + } args.GetReturnValue().Set( Array::New(env->isolate(), resources_info.data(), resources_info.size()));
diff --git a/src/node_process_methods.cc b/src/node_process_methods.cc index 3866c612600642..5c3505f25ab94f 100644 --- a/src/node_process_methods.cc +++ b/src/node_process_methods.cc @@ -50,6 +50,7 @@ using v8::HeapStatistics; using v8::Integer; using v8::Isolate; using v8::Local; +using v8::LocalVector; using v8::Maybe; using v8::NewStringType; using v8::Number; @@ -289,7 +290,7 @@ static void Uptime(const FunctionCallbackInfo<Value>& args) { static void GetActiveRequests(const FunctionCallbackInfo<Value>& args) { - std::vector<Local<Value>> request_v; + LocalVector<Value> request_v(env->isolate()); AsyncWrap* w = req_wrap->GetAsyncWrap(); if (w->persistent().IsEmpty()) @@ -306,7 +307,7 @@ static void GetActiveRequests(const FunctionCallbackInfo<Value>& args) { void GetActiveHandles(const FunctionCallbackInfo<Value>& args) { - std::vector<Local<Value>> handle_v; for (auto w : *env->handle_wrap_queue()) { if (!HandleWrap::HasRef(w)) continue; @@ -318,7 +319,7 @@ void GetActiveHandles(const FunctionCallbackInfo<Value>& args) { static void GetActiveResourcesInfo(const FunctionCallbackInfo<Value>& args) { - std::vector<Local<Value>> resources_info; + LocalVector<Value> resources_info(env->isolate()); // Active requests @@ -336,14 +337,17 @@ static void GetActiveResourcesInfo(const FunctionCallbackInfo<Value>& args) { } // Active timeouts - env->timeout_info()[0], - FIXED_ONE_BYTE_STRING(env->isolate(), "Timeout")); + Local<Value> timeout_str = FIXED_ONE_BYTE_STRING(env->isolate(), "Timeout"); + for (int i = 0; i < env->timeout_info()[0]; ++i) { + resources_info.push_back(timeout_str); // Active immediates - env->immediate_info()->ref_count(), + Local<Value> immediate_str = + FIXED_ONE_BYTE_STRING(env->isolate(), "Immediate"); args.GetReturnValue().Set( Array::New(env->isolate(), resources_info.data(), resources_info.size()));
[ "+ LocalVector<Value> handle_v(env->isolate());", "- FIXED_ONE_BYTE_STRING(env->isolate(), \"Immediate\"));", "+ for (uint32_t i = 0; i < env->immediate_info()->ref_count(); ++i) {", "+ resources_info.push_back(immediate_str);" ]
[ 26, 54, 57, 58 ]
{ "additions": 13, "author": "Aditi-1400", "deletions": 9, "html_url": "https://github.com/nodejs/node/pull/57646", "issue_id": 57646, "merged_at": "2025-04-16T16:59:07Z", "omission_probability": 0.1, "pr_number": 57646, "repo": "nodejs/node", "title": "Update std::vector<v8::Local<T>> to use v8::LocalVector<T> (Part 3)", "total_changes": 22 }
871
diff --git a/src/README.md b/src/README.md index c8c647e872da3d..4471dd7721ad8c 100644 --- a/src/README.md +++ b/src/README.md @@ -151,6 +151,25 @@ is done executing. `Local` handles can only be allocated on the C++ stack. Most of the V8 API uses `Local` handles to work with JavaScript values or return them from functions. +Additionally, according to [V8 public API documentation][`v8::Local<T>`], local handles +(`v8::Local<T>`) should **never** be allocated on the heap. + +This disallows heap-allocated data structures containing instances of `v8::Local` + +For example: + +```cpp +// Don't do this +std::vector<v8::Local<v8::Value>> v1; +``` + +Instead, it is recommended to use `v8::LocalVector<T>` provided by V8 +for such scenarios: + +```cpp +v8::LocalVector<v8::Value> v1(isolate); +``` + Whenever a `Local` handle is created, a `v8::HandleScope` or `v8::EscapableHandleScope` object must exist on the stack. The `Local` is then added to that scope and deleted along with it. @@ -1409,6 +1428,7 @@ static void GetUserInfo(const FunctionCallbackInfo<Value>& args) { [`v8.h` in Code Search]: https://cs.chromium.org/chromium/src/v8/include/v8.h [`v8.h` in Node.js]: https://github.com/nodejs/node/blob/HEAD/deps/v8/include/v8.h [`v8.h` in V8]: https://github.com/v8/v8/blob/HEAD/include/v8.h +[`v8::Local<T>`]: https://v8.github.io/api/head/classv8_1_1Local.html [`vm` module]: https://nodejs.org/api/vm.html [binding function]: #binding-functions [cleanup hooks]: #cleanup-hooks diff --git a/src/node_contextify.cc b/src/node_contextify.cc index af05a2ca3e9208..df44ec1f1dc6e1 100644 --- a/src/node_contextify.cc +++ b/src/node_contextify.cc @@ -1166,7 +1166,7 @@ Maybe<void> StoreCodeCacheResult( MaybeLocal<Function> CompileFunction(Local<Context> context, Local<String> filename, Local<String> content, - std::vector<Local<String>>* parameters) { + LocalVector<String>* parameters) { ScriptOrigin script_origin(filename, 0, 0, true); ScriptCompiler::Source script_source(content, script_origin); @@ -1483,7 +1483,7 @@ void ContextifyContext::CompileFunction( Context::Scope scope(parsing_context); // Read context extensions from buffer - std::vector<Local<Object>> context_extensions; + LocalVector<Object> context_extensions(isolate); if (!context_extensions_buf.IsEmpty()) { for (uint32_t n = 0; n < context_extensions_buf->Length(); n++) { Local<Value> val; @@ -1494,7 +1494,7 @@ void ContextifyContext::CompileFunction( } // Read params from params buffer - std::vector<Local<String>> params; + LocalVector<String> params(isolate); if (!params_buf.IsEmpty()) { for (uint32_t n = 0; n < params_buf->Length(); n++) { Local<Value> val; @@ -1526,22 +1526,24 @@ void ContextifyContext::CompileFunction( args.GetReturnValue().Set(result); } -static std::vector<Local<String>> GetCJSParameters(IsolateData* data) { - return { - data->exports_string(), - data->require_string(), - data->module_string(), - data->__filename_string(), - data->__dirname_string(), - }; +static LocalVector<String> GetCJSParameters(IsolateData* data) { + LocalVector<String> result(data->isolate(), + { + data->exports_string(), + data->require_string(), + data->module_string(), + data->__filename_string(), + data->__dirname_string(), + }); + return result; } Local<Object> ContextifyContext::CompileFunctionAndCacheResult( Environment* env, Local<Context> parsing_context, ScriptCompiler::Source* source, - std::vector<Local<String>> params, - std::vector<Local<Object>> context_extensions, + LocalVector<String> params, + LocalVector<Object> context_extensions, ScriptCompiler::CompileOptions options, bool produce_cached_data, Local<Symbol> id_symbol, @@ -1677,7 +1679,7 @@ static MaybeLocal<Function> CompileFunctionForCJSLoader( options = ScriptCompiler::kConsumeCodeCache; } - std::vector<Local<String>> params; + LocalVector<String> params(isolate); if (is_cjs_scope) { params = GetCJSParameters(env->isolate_data()); } diff --git a/src/node_contextify.h b/src/node_contextify.h index de69c22b0ebaed..ba964811bb6740 100644 --- a/src/node_contextify.h +++ b/src/node_contextify.h @@ -149,8 +149,8 @@ class ContextifyContext final : CPPGC_MIXIN(ContextifyContext) { Environment* env, v8::Local<v8::Context> parsing_context, v8::ScriptCompiler::Source* source, - std::vector<v8::Local<v8::String>> params, - std::vector<v8::Local<v8::Object>> context_extensions, + v8::LocalVector<v8::String> params, + v8::LocalVector<v8::Object> context_extensions, v8::ScriptCompiler::CompileOptions options, bool produce_cached_data, v8::Local<v8::Symbol> id_symbol, @@ -244,7 +244,7 @@ v8::MaybeLocal<v8::Function> CompileFunction( v8::Local<v8::Context> context, v8::Local<v8::String> filename, v8::Local<v8::String> content, - std::vector<v8::Local<v8::String>>* parameters); + v8::LocalVector<v8::String>* parameters); } // namespace contextify } // namespace node diff --git a/src/node_sea.cc b/src/node_sea.cc index fb9f933a19fa70..d8f2a65867f361 100644 --- a/src/node_sea.cc +++ b/src/node_sea.cc @@ -36,6 +36,7 @@ using v8::FunctionCallbackInfo; using v8::HandleScope; using v8::Isolate; using v8::Local; +using v8::LocalVector; using v8::MaybeLocal; using v8::NewStringType; using v8::Object; @@ -450,13 +451,15 @@ std::optional<std::string> GenerateCodeCache(std::string_view main_path, return std::nullopt; } - std::vector<Local<String>> parameters = { - FIXED_ONE_BYTE_STRING(isolate, "exports"), - FIXED_ONE_BYTE_STRING(isolate, "require"), - FIXED_ONE_BYTE_STRING(isolate, "module"), - FIXED_ONE_BYTE_STRING(isolate, "__filename"), - FIXED_ONE_BYTE_STRING(isolate, "__dirname"), - }; + LocalVector<String> parameters( + isolate, + { + FIXED_ONE_BYTE_STRING(isolate, "exports"), + FIXED_ONE_BYTE_STRING(isolate, "require"), + FIXED_ONE_BYTE_STRING(isolate, "module"), + FIXED_ONE_BYTE_STRING(isolate, "__filename"), + FIXED_ONE_BYTE_STRING(isolate, "__dirname"), + }); // TODO(RaisinTen): Using the V8 code cache prevents us from using `import()` // in the SEA code. Support it. diff --git a/src/node_snapshotable.cc b/src/node_snapshotable.cc index f9acb7b1d1618e..7207e68d127ae0 100644 --- a/src/node_snapshotable.cc +++ b/src/node_snapshotable.cc @@ -41,6 +41,7 @@ using v8::FunctionCallbackInfo; using v8::HandleScope; using v8::Isolate; using v8::Local; +using v8::LocalVector; using v8::Object; using v8::ObjectTemplate; using v8::SnapshotCreator; @@ -1479,11 +1480,13 @@ void CompileSerializeMain(const FunctionCallbackInfo<Value>& args) { Local<Context> context = isolate->GetCurrentContext(); // TODO(joyeecheung): do we need all of these? Maybe we would want a less // internal version of them. - std::vector<Local<String>> parameters = { - FIXED_ONE_BYTE_STRING(isolate, "require"), - FIXED_ONE_BYTE_STRING(isolate, "__filename"), - FIXED_ONE_BYTE_STRING(isolate, "__dirname"), - }; + LocalVector<String> parameters( + isolate, + { + FIXED_ONE_BYTE_STRING(isolate, "require"), + FIXED_ONE_BYTE_STRING(isolate, "__filename"), + FIXED_ONE_BYTE_STRING(isolate, "__dirname"), + }); Local<Function> fn; if (contextify::CompileFunction(context, filename, source, &parameters) .ToLocal(&fn)) {
diff --git a/src/README.md b/src/README.md index c8c647e872da3d..4471dd7721ad8c 100644 --- a/src/README.md +++ b/src/README.md @@ -151,6 +151,25 @@ is done executing. `Local` handles can only be allocated on the C++ stack. Most of the V8 API uses `Local` handles to work with JavaScript values or return them from functions. +Additionally, according to [V8 public API documentation][`v8::Local<T>`], local handles +(`v8::Local<T>`) should **never** be allocated on the heap. +This disallows heap-allocated data structures containing instances of `v8::Local` +For example: +// Don't do this +std::vector<v8::Local<v8::Value>> v1; +Instead, it is recommended to use `v8::LocalVector<T>` provided by V8 +for such scenarios: +v8::LocalVector<v8::Value> v1(isolate); Whenever a `Local` handle is created, a `v8::HandleScope` or `v8::EscapableHandleScope` object must exist on the stack. The `Local` is then added to that scope and deleted along with it. @@ -1409,6 +1428,7 @@ static void GetUserInfo(const FunctionCallbackInfo<Value>& args) { [`v8.h` in Code Search]: https://cs.chromium.org/chromium/src/v8/include/v8.h [`v8.h` in Node.js]: https://github.com/nodejs/node/blob/HEAD/deps/v8/include/v8.h [`v8.h` in V8]: https://github.com/v8/v8/blob/HEAD/include/v8.h +[`v8::Local<T>`]: https://v8.github.io/api/head/classv8_1_1Local.html [`vm` module]: https://nodejs.org/api/vm.html [binding function]: #binding-functions [cleanup hooks]: #cleanup-hooks diff --git a/src/node_contextify.cc b/src/node_contextify.cc index af05a2ca3e9208..df44ec1f1dc6e1 100644 --- a/src/node_contextify.cc +++ b/src/node_contextify.cc @@ -1166,7 +1166,7 @@ Maybe<void> StoreCodeCacheResult( MaybeLocal<Function> CompileFunction(Local<Context> context, Local<String> filename, Local<String> content, - std::vector<Local<String>>* parameters) { + LocalVector<String>* parameters) { ScriptOrigin script_origin(filename, 0, 0, true); ScriptCompiler::Source script_source(content, script_origin); @@ -1483,7 +1483,7 @@ void ContextifyContext::CompileFunction( Context::Scope scope(parsing_context); // Read context extensions from buffer - std::vector<Local<Object>> context_extensions; + LocalVector<Object> context_extensions(isolate); if (!context_extensions_buf.IsEmpty()) { for (uint32_t n = 0; n < context_extensions_buf->Length(); n++) { @@ -1494,7 +1494,7 @@ void ContextifyContext::CompileFunction( // Read params from params buffer if (!params_buf.IsEmpty()) { for (uint32_t n = 0; n < params_buf->Length(); n++) { @@ -1526,22 +1526,24 @@ void ContextifyContext::CompileFunction( args.GetReturnValue().Set(result); -static std::vector<Local<String>> GetCJSParameters(IsolateData* data) { - return { - data->exports_string(), - data->require_string(), - data->module_string(), - data->__filename_string(), - data->__dirname_string(), +static LocalVector<String> GetCJSParameters(IsolateData* data) { + { + data->exports_string(), + data->require_string(), + data->module_string(), + data->__filename_string(), + data->__dirname_string(), + }); + return result; Local<Object> ContextifyContext::CompileFunctionAndCacheResult( Environment* env, Local<Context> parsing_context, ScriptCompiler::Source* source, - std::vector<Local<String>> params, + LocalVector<String> params, + LocalVector<Object> context_extensions, ScriptCompiler::CompileOptions options, bool produce_cached_data, Local<Symbol> id_symbol, @@ -1677,7 +1679,7 @@ static MaybeLocal<Function> CompileFunctionForCJSLoader( options = ScriptCompiler::kConsumeCodeCache; if (is_cjs_scope) { params = GetCJSParameters(env->isolate_data()); diff --git a/src/node_contextify.h b/src/node_contextify.h index de69c22b0ebaed..ba964811bb6740 100644 --- a/src/node_contextify.h +++ b/src/node_contextify.h @@ -149,8 +149,8 @@ class ContextifyContext final : CPPGC_MIXIN(ContextifyContext) { Environment* env, v8::Local<v8::Context> parsing_context, v8::ScriptCompiler::Source* source, - std::vector<v8::Local<v8::String>> params, - std::vector<v8::Local<v8::Object>> context_extensions, + v8::LocalVector<v8::String> params, + v8::LocalVector<v8::Object> context_extensions, v8::ScriptCompiler::CompileOptions options, bool produce_cached_data, v8::Local<v8::Symbol> id_symbol, @@ -244,7 +244,7 @@ v8::MaybeLocal<v8::Function> CompileFunction( v8::Local<v8::Context> context, v8::Local<v8::String> filename, v8::Local<v8::String> content, - std::vector<v8::Local<v8::String>>* parameters); + v8::LocalVector<v8::String>* parameters); } // namespace contextify } // namespace node diff --git a/src/node_sea.cc b/src/node_sea.cc index fb9f933a19fa70..d8f2a65867f361 100644 --- a/src/node_sea.cc +++ b/src/node_sea.cc @@ -36,6 +36,7 @@ using v8::FunctionCallbackInfo; using v8::MaybeLocal; using v8::NewStringType; @@ -450,13 +451,15 @@ std::optional<std::string> GenerateCodeCache(std::string_view main_path, return std::nullopt; - FIXED_ONE_BYTE_STRING(isolate, "exports"), - FIXED_ONE_BYTE_STRING(isolate, "module"), + FIXED_ONE_BYTE_STRING(isolate, "exports"), + FIXED_ONE_BYTE_STRING(isolate, "module"), // TODO(RaisinTen): Using the V8 code cache prevents us from using `import()` // in the SEA code. Support it. diff --git a/src/node_snapshotable.cc b/src/node_snapshotable.cc index f9acb7b1d1618e..7207e68d127ae0 100644 --- a/src/node_snapshotable.cc +++ b/src/node_snapshotable.cc @@ -41,6 +41,7 @@ using v8::FunctionCallbackInfo; using v8::ObjectTemplate; using v8::SnapshotCreator; @@ -1479,11 +1480,13 @@ void CompileSerializeMain(const FunctionCallbackInfo<Value>& args) { Local<Context> context = isolate->GetCurrentContext(); // TODO(joyeecheung): do we need all of these? Maybe we would want a less // internal version of them. Local<Function> fn; if (contextify::CompileFunction(context, filename, source, &parameters) .ToLocal(&fn)) {
[ "+ LocalVector<String> result(data->isolate(),", "- std::vector<Local<Object>> context_extensions," ]
[ 82, 98 ]
{ "additions": 57, "author": "Aditi-1400", "deletions": 29, "html_url": "https://github.com/nodejs/node/pull/57642", "issue_id": 57642, "merged_at": "2025-04-16T16:58:56Z", "omission_probability": 0.1, "pr_number": 57642, "repo": "nodejs/node", "title": "Update std::vector<v8::Local<T>> to use v8::LocalVector<T> (Part 2)", "total_changes": 86 }
872
diff --git a/src/node_builtins.cc b/src/node_builtins.cc index d619379ad07df2..f7a6f84d5758fb 100644 --- a/src/node_builtins.cc +++ b/src/node_builtins.cc @@ -17,6 +17,7 @@ using v8::FunctionCallbackInfo; using v8::IntegrityLevel; using v8::Isolate; using v8::Local; +using v8::LocalVector; using v8::MaybeLocal; using v8::Name; using v8::None; @@ -258,7 +259,7 @@ void BuiltinLoader::AddExternalizedBuiltin(const char* id, MaybeLocal<Function> BuiltinLoader::LookupAndCompileInternal( Local<Context> context, const char* id, - std::vector<Local<String>>* parameters, + LocalVector<String>* parameters, Realm* optional_realm) { Isolate* isolate = context->GetIsolate(); EscapableHandleScope scope(isolate); @@ -382,8 +383,8 @@ void BuiltinLoader::SaveCodeCache(const char* id, Local<Function> fun) { MaybeLocal<Function> BuiltinLoader::LookupAndCompile(Local<Context> context, const char* id, Realm* optional_realm) { - std::vector<Local<String>> parameters; Isolate* isolate = context->GetIsolate(); + LocalVector<String> parameters(isolate); // Detects parameters of the scripts based on module ids. // internal/bootstrap/realm: process, getLinkedBinding, // getInternalBinding, primordials @@ -497,7 +498,7 @@ MaybeLocal<Value> BuiltinLoader::CompileAndCall(Local<Context> context, MaybeLocal<Function> BuiltinLoader::LookupAndCompile( Local<Context> context, const char* id, - std::vector<Local<String>>* parameters, + LocalVector<String>* parameters, Realm* optional_realm) { return LookupAndCompileInternal(context, id, parameters, optional_realm); } diff --git a/src/node_builtins.h b/src/node_builtins.h index 196934b706b9dd..83ddad2127227d 100644 --- a/src/node_builtins.h +++ b/src/node_builtins.h @@ -101,7 +101,7 @@ class NODE_EXTERN_PRIVATE BuiltinLoader { v8::MaybeLocal<v8::Function> LookupAndCompile( v8::Local<v8::Context> context, const char* id, - std::vector<v8::Local<v8::String>>* parameters, + v8::LocalVector<v8::String>* parameters, Realm* optional_realm); v8::MaybeLocal<v8::Value> CompileAndCall(v8::Local<v8::Context> context, @@ -159,7 +159,7 @@ class NODE_EXTERN_PRIVATE BuiltinLoader { v8::MaybeLocal<v8::Function> LookupAndCompileInternal( v8::Local<v8::Context> context, const char* id, - std::vector<v8::Local<v8::String>>* parameters, + v8::LocalVector<v8::String>* parameters, Realm* optional_realm); void SaveCodeCache(const char* id, v8::Local<v8::Function> fn);
diff --git a/src/node_builtins.cc b/src/node_builtins.cc index d619379ad07df2..f7a6f84d5758fb 100644 --- a/src/node_builtins.cc +++ b/src/node_builtins.cc @@ -17,6 +17,7 @@ using v8::FunctionCallbackInfo; using v8::IntegrityLevel; using v8::Isolate; using v8::Local; +using v8::LocalVector; using v8::MaybeLocal; using v8::Name; using v8::None; @@ -258,7 +259,7 @@ void BuiltinLoader::AddExternalizedBuiltin(const char* id, MaybeLocal<Function> BuiltinLoader::LookupAndCompileInternal( EscapableHandleScope scope(isolate); @@ -382,8 +383,8 @@ void BuiltinLoader::SaveCodeCache(const char* id, Local<Function> fun) { MaybeLocal<Function> BuiltinLoader::LookupAndCompile(Local<Context> context, const char* id, Realm* optional_realm) { - std::vector<Local<String>> parameters; + LocalVector<String> parameters(isolate); // Detects parameters of the scripts based on module ids. // internal/bootstrap/realm: process, getLinkedBinding, // getInternalBinding, primordials @@ -497,7 +498,7 @@ MaybeLocal<Value> BuiltinLoader::CompileAndCall(Local<Context> context, MaybeLocal<Function> BuiltinLoader::LookupAndCompile( return LookupAndCompileInternal(context, id, parameters, optional_realm); } diff --git a/src/node_builtins.h b/src/node_builtins.h index 196934b706b9dd..83ddad2127227d 100644 --- a/src/node_builtins.h +++ b/src/node_builtins.h @@ -101,7 +101,7 @@ class NODE_EXTERN_PRIVATE BuiltinLoader { v8::MaybeLocal<v8::Function> LookupAndCompile( v8::MaybeLocal<v8::Value> CompileAndCall(v8::Local<v8::Context> context, @@ -159,7 +159,7 @@ class NODE_EXTERN_PRIVATE BuiltinLoader { v8::MaybeLocal<v8::Function> LookupAndCompileInternal( void SaveCodeCache(const char* id, v8::Local<v8::Function> fn);
[]
[]
{ "additions": 6, "author": "Aditi-1400", "deletions": 5, "html_url": "https://github.com/nodejs/node/pull/57578", "issue_id": 57578, "merged_at": "2025-04-16T16:58:45Z", "omission_probability": 0.1, "pr_number": 57578, "repo": "nodejs/node", "title": "src: update std::vector<v8::Local<T>> to use v8::LocalVector<T>", "total_changes": 11 }
873
diff --git a/lib/internal/test_runner/runner.js b/lib/internal/test_runner/runner.js index 733a745a494162..7943917a08840e 100644 --- a/lib/internal/test_runner/runner.js +++ b/lib/internal/test_runner/runner.js @@ -502,7 +502,10 @@ function watchFiles(testFiles, opts) { return; // Avoid rerunning files when file deleted } } - + // Reset the root start time to recalculate the duration + // of the run + opts.root.clearExecutionTime(); + // Restart test files if (opts.isolation === 'none') { PromisePrototypeThen(restartTestFile(kIsolatedProcessName), undefined, (error) => { triggerUncaughtException(error, true /* fromPromise */); diff --git a/lib/internal/test_runner/test.js b/lib/internal/test_runner/test.js index bdeeddd0892c1b..fae26494ef9125 100644 --- a/lib/internal/test_runner/test.js +++ b/lib/internal/test_runner/test.js @@ -1335,6 +1335,11 @@ class Test extends AsyncResource { this.parent.reportStarted(); this.reporter.start(this.nesting, this.loc, this.name); } + + clearExecutionTime() { + this.startTime = hrtime(); + this.endTime = null; + } } class TestHook extends Test { diff --git a/test/parallel/test-runner-run-watch.mjs b/test/parallel/test-runner-run-watch.mjs index 83e8524f44a338..739cc29db6365e 100644 --- a/test/parallel/test-runner-run-watch.mjs +++ b/test/parallel/test-runner-run-watch.mjs @@ -189,6 +189,8 @@ async function testWatch( action === 'rename2' && await testRename(); action === 'delete' && await testDelete(); action === 'create' && await testCreate(); + + return runs; } describe('test runner watch mode', () => { @@ -241,6 +243,20 @@ describe('test runner watch mode', () => { await testWatch({ action: 'create', fileToCreate: 'new-test-file.test.js' }); }); + // This test is flaky by its nature as it relies on the timing of 2 different runs + // considering the number of digits in the duration_ms is 9 + // the chances of having the same duration_ms are very low + // but not impossible + // In case of costant failures, consider increasing the number of tests + it('should recalculate the run duration on a watch restart', async () => { + const testRuns = await testWatch({ file: 'test.js', fileToUpdate: 'test.js' }); + const durations = testRuns.map((run) => { + const runDuration = run.match(/# duration_ms\s([\d.]+)/); + return runDuration; + }); + assert.notDeepStrictEqual(durations[0][1], durations[1][1]); + }); + describe('test runner watch mode with different cwd', () => { it( 'should execute run using a different cwd for the runner than the process cwd',
diff --git a/lib/internal/test_runner/runner.js b/lib/internal/test_runner/runner.js index 733a745a494162..7943917a08840e 100644 --- a/lib/internal/test_runner/runner.js +++ b/lib/internal/test_runner/runner.js @@ -502,7 +502,10 @@ function watchFiles(testFiles, opts) { return; // Avoid rerunning files when file deleted } } - + // Reset the root start time to recalculate the duration + // of the run + opts.root.clearExecutionTime(); + // Restart test files if (opts.isolation === 'none') { PromisePrototypeThen(restartTestFile(kIsolatedProcessName), undefined, (error) => { triggerUncaughtException(error, true /* fromPromise */); diff --git a/lib/internal/test_runner/test.js b/lib/internal/test_runner/test.js index bdeeddd0892c1b..fae26494ef9125 100644 --- a/lib/internal/test_runner/test.js +++ b/lib/internal/test_runner/test.js @@ -1335,6 +1335,11 @@ class Test extends AsyncResource { this.parent.reportStarted(); this.reporter.start(this.nesting, this.loc, this.name); } + clearExecutionTime() { + this.startTime = hrtime(); + this.endTime = null; + } class TestHook extends Test { diff --git a/test/parallel/test-runner-run-watch.mjs b/test/parallel/test-runner-run-watch.mjs index 83e8524f44a338..739cc29db6365e 100644 --- a/test/parallel/test-runner-run-watch.mjs +++ b/test/parallel/test-runner-run-watch.mjs @@ -189,6 +189,8 @@ async function testWatch( action === 'rename2' && await testRename(); action === 'delete' && await testDelete(); action === 'create' && await testCreate(); + return runs; describe('test runner watch mode', () => { @@ -241,6 +243,20 @@ describe('test runner watch mode', () => { await testWatch({ action: 'create', fileToCreate: 'new-test-file.test.js' }); }); + // This test is flaky by its nature as it relies on the timing of 2 different runs + // considering the number of digits in the duration_ms is 9 + // the chances of having the same duration_ms are very low + // but not impossible + // In case of costant failures, consider increasing the number of tests + it('should recalculate the run duration on a watch restart', async () => { + const durations = testRuns.map((run) => { + const runDuration = run.match(/# duration_ms\s([\d.]+)/); + return runDuration; + }); + assert.notDeepStrictEqual(durations[0][1], durations[1][1]); + }); describe('test runner watch mode with different cwd', () => { it( 'should execute run using a different cwd for the runner than the process cwd',
[ "+ const testRuns = await testWatch({ file: 'test.js', fileToUpdate: 'test.js' });" ]
[ 55 ]
{ "additions": 25, "author": "pmarchini", "deletions": 1, "html_url": "https://github.com/nodejs/node/pull/57786", "issue_id": 57786, "merged_at": "2025-04-16T15:38:50Z", "omission_probability": 0.1, "pr_number": 57786, "repo": "nodejs/node", "title": "test_runner: recalculate run duration on watch restart", "total_changes": 26 }
874
diff --git a/lib/internal/readline/interface.js b/lib/internal/readline/interface.js index fe89598c523da8..429aa41d4bfabf 100644 --- a/lib/internal/readline/interface.js +++ b/lib/internal/readline/interface.js @@ -467,9 +467,15 @@ class Interface extends InterfaceConstructor { } // Convert newlines to a consistent format for history storage - [kNormalizeHistoryLineEndings](line, from, to) { + [kNormalizeHistoryLineEndings](line, from, to, reverse = true) { // Multiline history entries are saved reversed - if (StringPrototypeIncludes(line, '\r')) { + // History is structured with the newest entries at the top + // and the oldest at the bottom. Multiline histories, however, only occupy + // one line in the history file. When loading multiline history with + // an old node binary, the history will be saved in the old format. + // This is why we need to reverse the multilines. + // Reversing the multilines is necessary when adding / editing and displaying them + if (reverse) { // First reverse the lines for proper order, then convert separators return ArrayPrototypeJoin( ArrayPrototypeReverse(StringPrototypeSplit(line, from)), @@ -488,7 +494,7 @@ class Interface extends InterfaceConstructor { // If the trimmed line is empty then return the line if (StringPrototypeTrim(this.line).length === 0) return this.line; - const normalizedLine = this[kNormalizeHistoryLineEndings](this.line, '\n', '\r'); + const normalizedLine = this[kNormalizeHistoryLineEndings](this.line, '\n', '\r', false); if (this.history.length === 0 || this.history[0] !== normalizedLine) { if (this[kLastCommandErrored] && this.historyIndex === 0) { diff --git a/test/parallel/test-repl-multiline-navigation.js b/test/parallel/test-repl-multiline-navigation.js index f8bdcc842d595e..75e8833768b7c2 100644 --- a/test/parallel/test-repl-multiline-navigation.js +++ b/test/parallel/test-repl-multiline-navigation.js @@ -7,7 +7,6 @@ const common = require('../common'); const assert = require('assert'); const repl = require('internal/repl'); const stream = require('stream'); -const fs = require('fs'); class ActionStream extends stream.Stream { run(data) { @@ -42,23 +41,11 @@ class ActionStream extends stream.Stream { } ActionStream.prototype.readable = true; -function cleanupTmpFile() { - try { - // Write over the file, clearing any history - fs.writeFileSync(defaultHistoryPath, ''); - } catch (err) { - if (err.code === 'ENOENT') return true; - throw err; - } - return true; -} - const tmpdir = require('../common/tmpdir'); tmpdir.refresh(); -const defaultHistoryPath = tmpdir.resolve('.node_repl_history'); { - cleanupTmpFile(); + const historyPath = tmpdir.resolve(`.${Math.floor(Math.random() * 10000)}`); // Make sure the cursor is at the right places. // If the cursor is at the end of a long line and the down key is pressed, // Move the cursor to the end of the next line, if shorter. @@ -97,7 +84,7 @@ const defaultHistoryPath = tmpdir.resolve('.node_repl_history'); }); repl.createInternalRepl( - { NODE_REPL_HISTORY: defaultHistoryPath }, + { NODE_REPL_HISTORY: historyPath }, { terminal: true, input: new ActionStream(), @@ -112,7 +99,7 @@ const defaultHistoryPath = tmpdir.resolve('.node_repl_history'); } { - cleanupTmpFile(); + const historyPath = tmpdir.resolve(`.${Math.floor(Math.random() * 10000)}`); // If the last command errored and the user is trying to edit it, // The errored line should be removed from history const checkResults = common.mustSucceed((r) => { @@ -130,12 +117,17 @@ const defaultHistoryPath = tmpdir.resolve('.node_repl_history'); r.input.run([{ name: 'enter' }]); assert.strictEqual(r.history.length, 1); - assert.strictEqual(r.history[0], 'let lineWithMistake = `I have some\rproblem with my syntax`'); + // Check that the line is properly set in the history structure + assert.strictEqual(r.history[0], 'problem with my syntax`\rlet lineWithMistake = `I have some'); assert.strictEqual(r.line, ''); + + r.input.run([{ name: 'up' }]); + // Check that the line is properly displayed + assert.strictEqual(r.line, 'let lineWithMistake = `I have some\nproblem with my syntax`'); }); repl.createInternalRepl( - { NODE_REPL_HISTORY: defaultHistoryPath }, + { NODE_REPL_HISTORY: historyPath }, { terminal: true, input: new ActionStream(), @@ -150,7 +142,7 @@ const defaultHistoryPath = tmpdir.resolve('.node_repl_history'); } { - cleanupTmpFile(); + const historyPath = tmpdir.resolve(`.${Math.floor(Math.random() * 10000)}`); const outputBuffer = []; // Test that the REPL preview is properly shown on multiline commands @@ -182,7 +174,7 @@ const defaultHistoryPath = tmpdir.resolve('.node_repl_history'); }); repl.createInternalRepl( - { NODE_REPL_HISTORY: defaultHistoryPath }, + { NODE_REPL_HISTORY: historyPath }, { preview: true, terminal: true,
diff --git a/lib/internal/readline/interface.js b/lib/internal/readline/interface.js index fe89598c523da8..429aa41d4bfabf 100644 --- a/lib/internal/readline/interface.js +++ b/lib/internal/readline/interface.js @@ -467,9 +467,15 @@ class Interface extends InterfaceConstructor { } // Convert newlines to a consistent format for history storage - [kNormalizeHistoryLineEndings](line, from, to) { + [kNormalizeHistoryLineEndings](line, from, to, reverse = true) { // Multiline history entries are saved reversed - if (StringPrototypeIncludes(line, '\r')) { + // History is structured with the newest entries at the top + // and the oldest at the bottom. Multiline histories, however, only occupy + // one line in the history file. When loading multiline history with + // an old node binary, the history will be saved in the old format. + // This is why we need to reverse the multilines. + // Reversing the multilines is necessary when adding / editing and displaying them + if (reverse) { // First reverse the lines for proper order, then convert separators return ArrayPrototypeJoin( ArrayPrototypeReverse(StringPrototypeSplit(line, from)), @@ -488,7 +494,7 @@ class Interface extends InterfaceConstructor { // If the trimmed line is empty then return the line if (StringPrototypeTrim(this.line).length === 0) return this.line; - const normalizedLine = this[kNormalizeHistoryLineEndings](this.line, '\n', '\r'); + const normalizedLine = this[kNormalizeHistoryLineEndings](this.line, '\n', '\r', false); if (this.history.length === 0 || this.history[0] !== normalizedLine) { if (this[kLastCommandErrored] && this.historyIndex === 0) { diff --git a/test/parallel/test-repl-multiline-navigation.js b/test/parallel/test-repl-multiline-navigation.js index f8bdcc842d595e..75e8833768b7c2 100644 --- a/test/parallel/test-repl-multiline-navigation.js +++ b/test/parallel/test-repl-multiline-navigation.js @@ -7,7 +7,6 @@ const common = require('../common'); const assert = require('assert'); const repl = require('internal/repl'); const stream = require('stream'); -const fs = require('fs'); class ActionStream extends stream.Stream { run(data) { @@ -42,23 +41,11 @@ class ActionStream extends stream.Stream { ActionStream.prototype.readable = true; -function cleanupTmpFile() { - try { - // Write over the file, clearing any history - fs.writeFileSync(defaultHistoryPath, ''); - } catch (err) { - if (err.code === 'ENOENT') return true; - throw err; - } - return true; -} - const tmpdir = require('../common/tmpdir'); tmpdir.refresh(); -const defaultHistoryPath = tmpdir.resolve('.node_repl_history'); // Make sure the cursor is at the right places. // If the cursor is at the end of a long line and the down key is pressed, // Move the cursor to the end of the next line, if shorter. @@ -97,7 +84,7 @@ const defaultHistoryPath = tmpdir.resolve('.node_repl_history'); @@ -112,7 +99,7 @@ const defaultHistoryPath = tmpdir.resolve('.node_repl_history'); // If the last command errored and the user is trying to edit it, // The errored line should be removed from history const checkResults = common.mustSucceed((r) => { @@ -130,12 +117,17 @@ const defaultHistoryPath = tmpdir.resolve('.node_repl_history'); r.input.run([{ name: 'enter' }]); assert.strictEqual(r.history.length, 1); - assert.strictEqual(r.history[0], 'let lineWithMistake = `I have some\rproblem with my syntax`'); + // Check that the line is properly set in the history structure + assert.strictEqual(r.history[0], 'problem with my syntax`\rlet lineWithMistake = `I have some'); assert.strictEqual(r.line, ''); + + r.input.run([{ name: 'up' }]); + // Check that the line is properly displayed + assert.strictEqual(r.line, 'let lineWithMistake = `I have some\nproblem with my syntax`'); @@ -150,7 +142,7 @@ const defaultHistoryPath = tmpdir.resolve('.node_repl_history'); const outputBuffer = []; // Test that the REPL preview is properly shown on multiline commands @@ -182,7 +174,7 @@ const defaultHistoryPath = tmpdir.resolve('.node_repl_history'); preview: true,
[]
[]
{ "additions": 21, "author": "puskin94", "deletions": 23, "html_url": "https://github.com/nodejs/node/pull/57874", "issue_id": 57874, "merged_at": "2025-04-16T15:01:40Z", "omission_probability": 0.1, "pr_number": 57874, "repo": "nodejs/node", "title": "repl: fix multiline history editing string order", "total_changes": 44 }
875
diff --git a/lib/internal/streams/end-of-stream.js b/lib/internal/streams/end-of-stream.js index c44f99e993ad5b..0db2d463b4c6d6 100644 --- a/lib/internal/streams/end-of-stream.js +++ b/lib/internal/streams/end-of-stream.js @@ -43,6 +43,9 @@ const { willEmitClose: _willEmitClose, kIsClosedPromise, } = require('internal/streams/utils'); + +// Lazy load +let AsyncLocalStorage; let addAbortListener; function isRequest(stream) { @@ -63,7 +66,8 @@ function eos(stream, options, callback) { validateFunction(callback, 'callback'); validateAbortSignal(options.signal, 'options.signal'); - callback = once(callback); + AsyncLocalStorage ??= require('async_hooks').AsyncLocalStorage; + callback = once(AsyncLocalStorage.bind(callback)); if (isReadableStream(stream) || isWritableStream(stream)) { return eosWeb(stream, options, callback); diff --git a/test/async-hooks/test-async-local-storage-stream-finished.js b/test/async-hooks/test-async-local-storage-stream-finished.js new file mode 100644 index 00000000000000..16162b2043abc2 --- /dev/null +++ b/test/async-hooks/test-async-local-storage-stream-finished.js @@ -0,0 +1,20 @@ +'use strict'; + +const common = require('../common'); +const { Readable, finished } = require('stream'); +const { AsyncLocalStorage } = require('async_hooks'); +const { strictEqual } = require('assert'); + +// This test verifies that AsyncLocalStorage context is maintained +// when using stream.finished() + +const readable = new Readable(); +const als = new AsyncLocalStorage(); + +als.run(321, () => { + finished(readable, common.mustCall(() => { + strictEqual(als.getStore(), 321); + })); +}); + +readable.destroy();
diff --git a/lib/internal/streams/end-of-stream.js b/lib/internal/streams/end-of-stream.js index c44f99e993ad5b..0db2d463b4c6d6 100644 --- a/lib/internal/streams/end-of-stream.js +++ b/lib/internal/streams/end-of-stream.js @@ -43,6 +43,9 @@ const { willEmitClose: _willEmitClose, kIsClosedPromise, } = require('internal/streams/utils'); +// Lazy load +let AsyncLocalStorage; let addAbortListener; function isRequest(stream) { @@ -63,7 +66,8 @@ function eos(stream, options, callback) { validateFunction(callback, 'callback'); validateAbortSignal(options.signal, 'options.signal'); - callback = once(callback); + AsyncLocalStorage ??= require('async_hooks').AsyncLocalStorage; + callback = once(AsyncLocalStorage.bind(callback)); if (isReadableStream(stream) || isWritableStream(stream)) { return eosWeb(stream, options, callback); diff --git a/test/async-hooks/test-async-local-storage-stream-finished.js b/test/async-hooks/test-async-local-storage-stream-finished.js new file mode 100644 index 00000000000000..16162b2043abc2 --- /dev/null +++ b/test/async-hooks/test-async-local-storage-stream-finished.js @@ -0,0 +1,20 @@ +'use strict'; +const common = require('../common'); +const { Readable, finished } = require('stream'); +const { AsyncLocalStorage } = require('async_hooks'); +const { strictEqual } = require('assert'); +// This test verifies that AsyncLocalStorage context is maintained +// when using stream.finished() +const als = new AsyncLocalStorage(); +als.run(321, () => { + finished(readable, common.mustCall(() => { + strictEqual(als.getStore(), 321); + })); +}); +readable.destroy();
[ "+const readable = new Readable();" ]
[ 40 ]
{ "additions": 25, "author": "gurgunday", "deletions": 1, "html_url": "https://github.com/nodejs/node/pull/57865", "issue_id": 57865, "merged_at": "2025-04-16T09:27:38Z", "omission_probability": 0.1, "pr_number": 57865, "repo": "nodejs/node", "title": "stream: preserve asynclocalstorage context in finished()", "total_changes": 26 }
876
diff --git a/lib/internal/webidl.js b/lib/internal/webidl.js index 351c1398c6d49c..071e8b9967e03a 100644 --- a/lib/internal/webidl.js +++ b/lib/internal/webidl.js @@ -2,6 +2,7 @@ const { ArrayPrototypePush, + ArrayPrototypeToSorted, MathAbs, MathMax, MathMin, @@ -29,6 +30,15 @@ const { kEmptyObject } = require('internal/util'); const converters = { __proto__: null }; +const UNDEFINED = 1; +const BOOLEAN = 2; +const STRING = 3; +const SYMBOL = 4; +const NUMBER = 5; +const BIGINT = 6; +const NULL = 7; +const OBJECT = 8; + /** * @see https://webidl.spec.whatwg.org/#es-any * @param {any} V @@ -39,7 +49,7 @@ converters.any = (V) => { }; converters.object = (V, opts = kEmptyObject) => { - if (type(V) !== 'Object') { + if (type(V) !== OBJECT) { throw makeException( 'is not an object', kEmptyObject, @@ -236,37 +246,98 @@ function createEnumConverter(name, values) { // https://tc39.es/ecma262/#sec-ecmascript-data-types-and-values function type(V) { - if (V === null) - return 'Null'; - switch (typeof V) { case 'undefined': - return 'Undefined'; + return UNDEFINED; case 'boolean': - return 'Boolean'; + return BOOLEAN; case 'number': - return 'Number'; + return NUMBER; case 'string': - return 'String'; + return STRING; case 'symbol': - return 'Symbol'; + return SYMBOL; case 'bigint': - return 'BigInt'; + return BIGINT; case 'object': // Fall through case 'function': // Fall through default: + if (V === null) { + return NULL; + } // Per ES spec, typeof returns an implementation-defined value that is not // any of the existing ones for uncallable non-standard exotic objects. // Yet Type() which the Web IDL spec depends on returns Object for such // cases. So treat the default case as an object. - return 'Object'; + return OBJECT; } } +// https://webidl.spec.whatwg.org/#js-dictionary +function createDictionaryConverter(members) { + // The spec requires us to operate the members of a dictionary in + // lexicographical order. We are doing this in the outer scope to + // reduce the overhead that could happen in the returned function. + const sortedMembers = ArrayPrototypeToSorted(members, (a, b) => { + if (a.key === b.key) { + return 0; + } + return a.key < b.key ? -1 : 1; + }); + + return function( + V, + opts = kEmptyObject, + ) { + if (V != null && type(V) !== OBJECT) { + throw makeException( + 'cannot be converted to a dictionary', + opts, + ); + } + + const idlDict = { __proto__: null }; + for (let i = 0; i < sortedMembers.length; i++) { + const member = sortedMembers[i]; + const key = member.key; + let jsMemberValue; + if (V == null) { + jsMemberValue = undefined; + } else { + jsMemberValue = V[key]; + } + + if (jsMemberValue !== undefined) { + const memberContext = opts.context ? `${key} in ${opts.context}` : `${key}`; + const converter = member.converter; + const idlMemberValue = converter( + jsMemberValue, + { + __proto__: null, + prefix: opts.prefix, + context: memberContext, + }, + ); + idlDict[key] = idlMemberValue; + } else if (typeof member.defaultValue === 'function') { + const idlMemberValue = member.defaultValue(); + idlDict[key] = idlMemberValue; + } else if (member.required) { + throw makeException( + `cannot be converted because of the missing '${key}'`, + opts, + ); + } + } + + return idlDict; + }; +} + // https://webidl.spec.whatwg.org/#es-sequence function createSequenceConverter(converter) { return function(V, opts = kEmptyObject) { - if (type(V) !== 'Object') { + if (type(V) !== OBJECT) { throw makeException( 'can not be converted to sequence.', opts); @@ -318,6 +389,7 @@ module.exports = { createEnumConverter, createInterfaceConverter, createSequenceConverter, + createDictionaryConverter, evenRound, makeException, }; diff --git a/lib/internal/worker/js_transferable.js b/lib/internal/worker/js_transferable.js index 58e377b87a9d11..592d43d2152e0a 100644 --- a/lib/internal/worker/js_transferable.js +++ b/lib/internal/worker/js_transferable.js @@ -5,7 +5,6 @@ const { } = primordials; const { codes: { - ERR_INVALID_ARG_TYPE, ERR_MISSING_ARGS, }, } = require('internal/errors'); @@ -98,29 +97,31 @@ function markTransferMode(obj, cloneable = false, transferable = false) { obj[transfer_mode_private_symbol] = mode; } + +webidl.converters.StructuredSerializeOptions = webidl + .createDictionaryConverter( + [ + { + key: 'transfer', + converter: webidl.converters['sequence<object>'], + defaultValue: () => [], + }, + ], + ); + function structuredClone(value, options) { if (arguments.length === 0) { throw new ERR_MISSING_ARGS('The value argument must be specified'); } - // TODO(jazelly): implement generic webidl dictionary converter - const prefix = 'Options'; - const optionsType = webidl.type(options); - if (optionsType !== 'Undefined' && optionsType !== 'Null' && optionsType !== 'Object') { - throw new ERR_INVALID_ARG_TYPE( - prefix, - ['object', 'null', 'undefined'], - options, - ); - } - const key = 'transfer'; - const idlOptions = { __proto__: null, [key]: [] }; - if (options != null && key in options && options[key] !== undefined) { - idlOptions[key] = webidl.converters['sequence<object>'](options[key], { + const idlOptions = webidl.converters.StructuredSerializeOptions( + options, + { __proto__: null, - context: 'Transfer', - }); - } + prefix: "Failed to execute 'structuredClone'", + context: 'Options', + }, + ); const serializedData = nativeStructuredClone(value, idlOptions); return serializedData; diff --git a/test/parallel/test-structuredClone-global.js b/test/parallel/test-structuredClone-global.js index ef6ddc56a73cca..e6b63c382b39b1 100644 --- a/test/parallel/test-structuredClone-global.js +++ b/test/parallel/test-structuredClone-global.js @@ -3,12 +3,23 @@ require('../common'); const assert = require('assert'); +const prefix = "Failed to execute 'structuredClone'"; +const key = 'transfer'; +const context = 'Options'; +const memberConverterError = `${prefix}: ${key} in ${context} can not be converted to sequence.`; +const dictionaryConverterError = `${prefix}: ${context} cannot be converted to a dictionary`; + assert.throws(() => structuredClone(), { code: 'ERR_MISSING_ARGS' }); -assert.throws(() => structuredClone(undefined, ''), { code: 'ERR_INVALID_ARG_TYPE' }); -assert.throws(() => structuredClone(undefined, 1), { code: 'ERR_INVALID_ARG_TYPE' }); -assert.throws(() => structuredClone(undefined, { transfer: 1 }), { code: 'ERR_INVALID_ARG_TYPE' }); -assert.throws(() => structuredClone(undefined, { transfer: '' }), { code: 'ERR_INVALID_ARG_TYPE' }); -assert.throws(() => structuredClone(undefined, { transfer: null }), { code: 'ERR_INVALID_ARG_TYPE' }); +assert.throws(() => structuredClone(undefined, ''), + { code: 'ERR_INVALID_ARG_TYPE', message: dictionaryConverterError }); +assert.throws(() => structuredClone(undefined, 1), + { code: 'ERR_INVALID_ARG_TYPE', message: dictionaryConverterError }); +assert.throws(() => structuredClone(undefined, { transfer: 1 }), + { code: 'ERR_INVALID_ARG_TYPE', message: memberConverterError }); +assert.throws(() => structuredClone(undefined, { transfer: '' }), + { code: 'ERR_INVALID_ARG_TYPE', message: memberConverterError }); +assert.throws(() => structuredClone(undefined, { transfer: null }), + { code: 'ERR_INVALID_ARG_TYPE', message: memberConverterError }); // Options can be null or undefined. assert.strictEqual(structuredClone(undefined), undefined);
diff --git a/lib/internal/webidl.js b/lib/internal/webidl.js index 351c1398c6d49c..071e8b9967e03a 100644 --- a/lib/internal/webidl.js +++ b/lib/internal/webidl.js @@ -2,6 +2,7 @@ ArrayPrototypePush, + ArrayPrototypeToSorted, MathAbs, MathMax, MathMin, @@ -29,6 +30,15 @@ const { kEmptyObject } = require('internal/util'); const converters = { __proto__: null }; +const UNDEFINED = 1; +const BOOLEAN = 2; +const STRING = 3; +const SYMBOL = 4; +const NUMBER = 5; +const BIGINT = 6; +const NULL = 7; +const OBJECT = 8; /** * @see https://webidl.spec.whatwg.org/#es-any * @param {any} V @@ -39,7 +49,7 @@ converters.any = (V) => { converters.object = (V, opts = kEmptyObject) => { - if (type(V) !== 'Object') { + if (type(V) !== OBJECT) { throw makeException( 'is not an object', kEmptyObject, @@ -236,37 +246,98 @@ function createEnumConverter(name, values) { // https://tc39.es/ecma262/#sec-ecmascript-data-types-and-values function type(V) { - if (V === null) - return 'Null'; switch (typeof V) { case 'undefined': - return 'Undefined'; + return UNDEFINED; case 'boolean': - return 'Boolean'; + return BOOLEAN; case 'number': - return 'Number'; + return NUMBER; case 'string': - return 'String'; + return STRING; case 'symbol': - return 'Symbol'; + return SYMBOL; case 'bigint': - return 'BigInt'; + return BIGINT; case 'object': // Fall through case 'function': // Fall through default: + if (V === null) { + return NULL; // Per ES spec, typeof returns an implementation-defined value that is not // any of the existing ones for uncallable non-standard exotic objects. // Yet Type() which the Web IDL spec depends on returns Object for such // cases. So treat the default case as an object. - return 'Object'; + return OBJECT; +function createDictionaryConverter(members) { + // The spec requires us to operate the members of a dictionary in + // lexicographical order. We are doing this in the outer scope to + // reduce the overhead that could happen in the returned function. + const sortedMembers = ArrayPrototypeToSorted(members, (a, b) => { + if (a.key === b.key) { + return a.key < b.key ? -1 : 1; + }); + return function( + V, + opts = kEmptyObject, + ) { + if (V != null && type(V) !== OBJECT) { + throw makeException( + 'cannot be converted to a dictionary', + opts, + ); + const idlDict = { __proto__: null }; + for (let i = 0; i < sortedMembers.length; i++) { + const member = sortedMembers[i]; + const key = member.key; + let jsMemberValue; + if (V == null) { + } else { + jsMemberValue = V[key]; + if (jsMemberValue !== undefined) { + jsMemberValue, + { + __proto__: null, + prefix: opts.prefix, + context: memberContext, + }, + } else if (typeof member.defaultValue === 'function') { + const idlMemberValue = member.defaultValue(); + } else if (member.required) { + throw makeException( + `cannot be converted because of the missing '${key}'`, + }; +} // https://webidl.spec.whatwg.org/#es-sequence function createSequenceConverter(converter) { return function(V, opts = kEmptyObject) { + if (type(V) !== OBJECT) { throw makeException( 'can not be converted to sequence.', opts); @@ -318,6 +389,7 @@ module.exports = { createEnumConverter, createInterfaceConverter, createSequenceConverter, + createDictionaryConverter, evenRound, makeException, diff --git a/lib/internal/worker/js_transferable.js b/lib/internal/worker/js_transferable.js index 58e377b87a9d11..592d43d2152e0a 100644 --- a/lib/internal/worker/js_transferable.js +++ b/lib/internal/worker/js_transferable.js @@ -5,7 +5,6 @@ const { } = primordials; codes: { - ERR_INVALID_ARG_TYPE, ERR_MISSING_ARGS, }, } = require('internal/errors'); @@ -98,29 +97,31 @@ function markTransferMode(obj, cloneable = false, transferable = false) { obj[transfer_mode_private_symbol] = mode; +webidl.converters.StructuredSerializeOptions = webidl + .createDictionaryConverter( + [ + { + key: 'transfer', + defaultValue: () => [], + }, + ], function structuredClone(value, options) { if (arguments.length === 0) { throw new ERR_MISSING_ARGS('The value argument must be specified'); - // TODO(jazelly): implement generic webidl dictionary converter - const prefix = 'Options'; - const optionsType = webidl.type(options); - if (optionsType !== 'Undefined' && optionsType !== 'Null' && optionsType !== 'Object') { - ['object', 'null', 'undefined'], - options, - ); - const key = 'transfer'; - const idlOptions = { __proto__: null, [key]: [] }; - if (options != null && key in options && options[key] !== undefined) { - idlOptions[key] = webidl.converters['sequence<object>'](options[key], { + const idlOptions = webidl.converters.StructuredSerializeOptions( + options, + { __proto__: null, - context: 'Transfer', - }); + prefix: "Failed to execute 'structuredClone'", + context: 'Options', + }, const serializedData = nativeStructuredClone(value, idlOptions); return serializedData; diff --git a/test/parallel/test-structuredClone-global.js b/test/parallel/test-structuredClone-global.js index ef6ddc56a73cca..e6b63c382b39b1 100644 --- a/test/parallel/test-structuredClone-global.js +++ b/test/parallel/test-structuredClone-global.js @@ -3,12 +3,23 @@ require('../common'); const assert = require('assert'); +const prefix = "Failed to execute 'structuredClone'"; +const key = 'transfer'; +const context = 'Options'; +const memberConverterError = `${prefix}: ${key} in ${context} can not be converted to sequence.`; +const dictionaryConverterError = `${prefix}: ${context} cannot be converted to a dictionary`; assert.throws(() => structuredClone(), { code: 'ERR_MISSING_ARGS' }); -assert.throws(() => structuredClone(undefined, ''), { code: 'ERR_INVALID_ARG_TYPE' }); -assert.throws(() => structuredClone(undefined, { transfer: 1 }), { code: 'ERR_INVALID_ARG_TYPE' }); -assert.throws(() => structuredClone(undefined, { transfer: '' }), { code: 'ERR_INVALID_ARG_TYPE' }); -assert.throws(() => structuredClone(undefined, { transfer: null }), { code: 'ERR_INVALID_ARG_TYPE' }); +assert.throws(() => structuredClone(undefined, 1), +assert.throws(() => structuredClone(undefined, { transfer: 1 }), +assert.throws(() => structuredClone(undefined, { transfer: '' }), +assert.throws(() => structuredClone(undefined, { transfer: null }), // Options can be null or undefined. assert.strictEqual(structuredClone(undefined), undefined);
[ "-", "+// https://webidl.spec.whatwg.org/#js-dictionary", "+ return 0;", "+ jsMemberValue = undefined;", "+ const memberContext = opts.context ? `${key} in ${opts.context}` : `${key}`;", "+ const converter = member.converter;", "+ const idlMemberValue = converter(", "+ opts,", "+ return idlDict;", "- if (type(V) !== 'Object') {", "+ converter: webidl.converters['sequence<object>'],", "- throw new ERR_INVALID_ARG_TYPE(", "- prefix,", "-assert.throws(() => structuredClone(undefined, 1), { code: 'ERR_INVALID_ARG_TYPE' });", "+assert.throws(() => structuredClone(undefined, '')," ]
[ 43, 78, 85, 107, 113, 114, 115, 130, 135, 142, 177, 192, 193, 232, 236 ]
{ "additions": 119, "author": "jazelly", "deletions": 35, "html_url": "https://github.com/nodejs/node/pull/55489", "issue_id": 55489, "merged_at": "2024-11-05T22:55:45Z", "omission_probability": 0.1, "pr_number": 55489, "repo": "nodejs/node", "title": "lib: implement webidl dictionary converter and use it in structuredClone", "total_changes": 154 }
877
diff --git a/lib/internal/bootstrap/web/exposed-window-or-worker.js b/lib/internal/bootstrap/web/exposed-window-or-worker.js index d5d844b229003d..d7d9b60b1c8e94 100644 --- a/lib/internal/bootstrap/web/exposed-window-or-worker.js +++ b/lib/internal/bootstrap/web/exposed-window-or-worker.js @@ -38,8 +38,11 @@ const { } = require('internal/process/task_queues'); defineOperation(globalThis, 'queueMicrotask', queueMicrotask); -const { structuredClone } = internalBinding('messaging'); -defineOperation(globalThis, 'structuredClone', structuredClone); +defineLazyProperties( + globalThis, + 'internal/worker/js_transferable', + ['structuredClone'], +); defineLazyProperties(globalThis, 'buffer', ['atob', 'btoa']); // https://html.spec.whatwg.org/multipage/web-messaging.html#broadcasting-to-other-browsing-contexts diff --git a/lib/internal/perf/usertiming.js b/lib/internal/perf/usertiming.js index dfbbcbaf36c538..b2783799067dde 100644 --- a/lib/internal/perf/usertiming.js +++ b/lib/internal/perf/usertiming.js @@ -31,7 +31,7 @@ const { }, } = require('internal/errors'); -const { structuredClone } = internalBinding('messaging'); +const { structuredClone } = require('internal/worker/js_transferable'); const { lazyDOMException, kEnumerableProperty, diff --git a/lib/internal/webidl.js b/lib/internal/webidl.js index 1278f4cac7fb81..351c1398c6d49c 100644 --- a/lib/internal/webidl.js +++ b/lib/internal/webidl.js @@ -38,6 +38,16 @@ converters.any = (V) => { return V; }; +converters.object = (V, opts = kEmptyObject) => { + if (type(V) !== 'Object') { + throw makeException( + 'is not an object', + kEmptyObject, + ); + } + return V; +}; + // https://webidl.spec.whatwg.org/#abstract-opdef-integerpart const integerPart = MathTrunc; @@ -189,6 +199,8 @@ converters.DOMString = function DOMString(V) { return String(V); }; +converters['sequence<object>'] = createSequenceConverter(converters.object); + function codedTypeError(message, errorProperties = kEmptyObject) { // eslint-disable-next-line no-restricted-syntax const err = new TypeError(message); diff --git a/lib/internal/webstreams/readablestream.js b/lib/internal/webstreams/readablestream.js index 10179321c10366..5d9379287e9605 100644 --- a/lib/internal/webstreams/readablestream.js +++ b/lib/internal/webstreams/readablestream.js @@ -74,6 +74,7 @@ const { kTransfer, kTransferList, markTransferMode, + structuredClone, } = require('internal/worker/js_transferable'); const { @@ -88,8 +89,6 @@ const { kControllerErrorFunction, } = require('internal/streams/utils'); -const { structuredClone } = internalBinding('messaging'); - const { ArrayBufferViewGetBuffer, ArrayBufferViewGetByteLength, diff --git a/lib/internal/worker/js_transferable.js b/lib/internal/worker/js_transferable.js index 083702149f29d9..58e377b87a9d11 100644 --- a/lib/internal/worker/js_transferable.js +++ b/lib/internal/worker/js_transferable.js @@ -3,6 +3,13 @@ const { Error, StringPrototypeSplit, } = primordials; +const { + codes: { + ERR_INVALID_ARG_TYPE, + ERR_MISSING_ARGS, + }, +} = require('internal/errors'); +const webidl = require('internal/webidl'); const { messaging_deserialize_symbol, messaging_transfer_symbol, @@ -11,6 +18,7 @@ const { } = internalBinding('symbols'); const { setDeserializerCreateObjectFunction, + structuredClone: nativeStructuredClone, } = internalBinding('messaging'); const { privateSymbols: { @@ -90,9 +98,38 @@ function markTransferMode(obj, cloneable = false, transferable = false) { obj[transfer_mode_private_symbol] = mode; } +function structuredClone(value, options) { + if (arguments.length === 0) { + throw new ERR_MISSING_ARGS('The value argument must be specified'); + } + + // TODO(jazelly): implement generic webidl dictionary converter + const prefix = 'Options'; + const optionsType = webidl.type(options); + if (optionsType !== 'Undefined' && optionsType !== 'Null' && optionsType !== 'Object') { + throw new ERR_INVALID_ARG_TYPE( + prefix, + ['object', 'null', 'undefined'], + options, + ); + } + const key = 'transfer'; + const idlOptions = { __proto__: null, [key]: [] }; + if (options != null && key in options && options[key] !== undefined) { + idlOptions[key] = webidl.converters['sequence<object>'](options[key], { + __proto__: null, + context: 'Transfer', + }); + } + + const serializedData = nativeStructuredClone(value, idlOptions); + return serializedData; +} + module.exports = { markTransferMode, setup, + structuredClone, kClone: messaging_clone_symbol, kDeserialize: messaging_deserialize_symbol, kTransfer: messaging_transfer_symbol, diff --git a/src/node_messaging.cc b/src/node_messaging.cc index 9d9e27c3624e6b..a1c22cf5005121 100644 --- a/src/node_messaging.cc +++ b/src/node_messaging.cc @@ -1578,28 +1578,21 @@ static void StructuredClone(const FunctionCallbackInfo<Value>& args) { Realm* realm = Realm::GetCurrent(context); Environment* env = realm->env(); - if (args.Length() == 0) { - return THROW_ERR_MISSING_ARGS(env, "The value argument must be specified"); - } - Local<Value> value = args[0]; TransferList transfer_list; - if (!args[1]->IsNullOrUndefined()) { - if (!args[1]->IsObject()) { - return THROW_ERR_INVALID_ARG_TYPE( - env, "The options argument must be either an object or undefined"); - } - Local<Object> options = args[1].As<Object>(); - Local<Value> transfer_list_v; - if (!options->Get(context, env->transfer_string()) - .ToLocal(&transfer_list_v)) { - return; - } + Local<Object> options = args[1].As<Object>(); + Local<Value> transfer_list_v; + if (!options->Get(context, env->transfer_string()) + .ToLocal(&transfer_list_v)) { + return; + } - // TODO(joyeecheung): implement this in JS land to avoid the C++ -> JS - // cost to convert a sequence into an array. - if (!GetTransferList(env, context, transfer_list_v, &transfer_list)) { + Local<Array> arr = transfer_list_v.As<Array>(); + size_t length = arr->Length(); + transfer_list.AllocateSufficientStorage(length); + for (size_t i = 0; i < length; i++) { + if (!arr->Get(context, i).ToLocal(&transfer_list[i])) { return; } } diff --git a/test/parallel/test-structuredClone-global.js b/test/parallel/test-structuredClone-global.js index 34b6abe32d3fcf..ef6ddc56a73cca 100644 --- a/test/parallel/test-structuredClone-global.js +++ b/test/parallel/test-structuredClone-global.js @@ -8,12 +8,12 @@ assert.throws(() => structuredClone(undefined, ''), { code: 'ERR_INVALID_ARG_TYP assert.throws(() => structuredClone(undefined, 1), { code: 'ERR_INVALID_ARG_TYPE' }); assert.throws(() => structuredClone(undefined, { transfer: 1 }), { code: 'ERR_INVALID_ARG_TYPE' }); assert.throws(() => structuredClone(undefined, { transfer: '' }), { code: 'ERR_INVALID_ARG_TYPE' }); +assert.throws(() => structuredClone(undefined, { transfer: null }), { code: 'ERR_INVALID_ARG_TYPE' }); // Options can be null or undefined. assert.strictEqual(structuredClone(undefined), undefined); assert.strictEqual(structuredClone(undefined, null), undefined); // Transfer can be null or undefined. -assert.strictEqual(structuredClone(undefined, { transfer: null }), undefined); assert.strictEqual(structuredClone(undefined, { }), undefined); // Transferables or its subclasses should be received with its closest transferable superclass @@ -43,6 +43,27 @@ for (const Transferrable of [File, Blob]) { assert.ok(extendedTransfer instanceof Transferrable); } +// Transfer can be iterable +{ + const value = { + a: new ReadableStream(), + b: new WritableStream(), + }; + const cloned = structuredClone(value, { + transfer: { + *[Symbol.iterator]() { + for (const key in value) { + yield value[key]; + } + } + } + }); + for (const key in value) { + assert.ok(value[key].locked); + assert.ok(!cloned[key].locked); + } +} + { // See: https://github.com/nodejs/node/issues/49940 const cloned = structuredClone({}, {
diff --git a/lib/internal/bootstrap/web/exposed-window-or-worker.js b/lib/internal/bootstrap/web/exposed-window-or-worker.js index d5d844b229003d..d7d9b60b1c8e94 100644 --- a/lib/internal/bootstrap/web/exposed-window-or-worker.js +++ b/lib/internal/bootstrap/web/exposed-window-or-worker.js @@ -38,8 +38,11 @@ const { } = require('internal/process/task_queues'); defineOperation(globalThis, 'queueMicrotask', queueMicrotask); -defineOperation(globalThis, 'structuredClone', structuredClone); +defineLazyProperties( + globalThis, + 'internal/worker/js_transferable', + ['structuredClone'], +); defineLazyProperties(globalThis, 'buffer', ['atob', 'btoa']); // https://html.spec.whatwg.org/multipage/web-messaging.html#broadcasting-to-other-browsing-contexts diff --git a/lib/internal/perf/usertiming.js b/lib/internal/perf/usertiming.js index dfbbcbaf36c538..b2783799067dde 100644 --- a/lib/internal/perf/usertiming.js +++ b/lib/internal/perf/usertiming.js @@ -31,7 +31,7 @@ const { }, } = require('internal/errors'); +const { structuredClone } = require('internal/worker/js_transferable'); lazyDOMException, kEnumerableProperty, diff --git a/lib/internal/webidl.js b/lib/internal/webidl.js index 1278f4cac7fb81..351c1398c6d49c 100644 --- a/lib/internal/webidl.js +++ b/lib/internal/webidl.js @@ -38,6 +38,16 @@ converters.any = (V) => { return V; +converters.object = (V, opts = kEmptyObject) => { + if (type(V) !== 'Object') { + throw makeException( + 'is not an object', + kEmptyObject, + return V; +}; // https://webidl.spec.whatwg.org/#abstract-opdef-integerpart const integerPart = MathTrunc; @@ -189,6 +199,8 @@ converters.DOMString = function DOMString(V) { return String(V); +converters['sequence<object>'] = createSequenceConverter(converters.object); function codedTypeError(message, errorProperties = kEmptyObject) { // eslint-disable-next-line no-restricted-syntax const err = new TypeError(message); diff --git a/lib/internal/webstreams/readablestream.js b/lib/internal/webstreams/readablestream.js index 10179321c10366..5d9379287e9605 100644 --- a/lib/internal/webstreams/readablestream.js +++ b/lib/internal/webstreams/readablestream.js @@ -74,6 +74,7 @@ const { kTransfer, kTransferList, } = require('internal/worker/js_transferable'); @@ -88,8 +89,6 @@ const { kControllerErrorFunction, } = require('internal/streams/utils'); ArrayBufferViewGetBuffer, ArrayBufferViewGetByteLength, diff --git a/lib/internal/worker/js_transferable.js b/lib/internal/worker/js_transferable.js index 083702149f29d9..58e377b87a9d11 100644 --- a/lib/internal/worker/js_transferable.js +++ b/lib/internal/worker/js_transferable.js @@ -3,6 +3,13 @@ const { Error, StringPrototypeSplit, } = primordials; + codes: { + ERR_INVALID_ARG_TYPE, + ERR_MISSING_ARGS, + }, +} = require('internal/errors'); +const webidl = require('internal/webidl'); messaging_deserialize_symbol, messaging_transfer_symbol, @@ -11,6 +18,7 @@ const { } = internalBinding('symbols'); setDeserializerCreateObjectFunction, + structuredClone: nativeStructuredClone, } = internalBinding('messaging'); privateSymbols: { @@ -90,9 +98,38 @@ function markTransferMode(obj, cloneable = false, transferable = false) { obj[transfer_mode_private_symbol] = mode; +function structuredClone(value, options) { + if (arguments.length === 0) { + throw new ERR_MISSING_ARGS('The value argument must be specified'); + // TODO(jazelly): implement generic webidl dictionary converter + const prefix = 'Options'; + const optionsType = webidl.type(options); + throw new ERR_INVALID_ARG_TYPE( + prefix, + ['object', 'null', 'undefined'], + options, + const key = 'transfer'; + const idlOptions = { __proto__: null, [key]: [] }; + if (options != null && key in options && options[key] !== undefined) { + idlOptions[key] = webidl.converters['sequence<object>'](options[key], { + __proto__: null, + context: 'Transfer', + }); + const serializedData = nativeStructuredClone(value, idlOptions); + return serializedData; module.exports = { setup, kClone: messaging_clone_symbol, kDeserialize: messaging_deserialize_symbol, kTransfer: messaging_transfer_symbol, diff --git a/src/node_messaging.cc b/src/node_messaging.cc index 9d9e27c3624e6b..a1c22cf5005121 100644 --- a/src/node_messaging.cc +++ b/src/node_messaging.cc @@ -1578,28 +1578,21 @@ static void StructuredClone(const FunctionCallbackInfo<Value>& args) { Realm* realm = Realm::GetCurrent(context); Environment* env = realm->env(); - if (args.Length() == 0) { - return THROW_ERR_MISSING_ARGS(env, "The value argument must be specified"); - } Local<Value> value = args[0]; TransferList transfer_list; - if (!args[1]->IsNullOrUndefined()) { - if (!args[1]->IsObject()) { - return THROW_ERR_INVALID_ARG_TYPE( - env, "The options argument must be either an object or undefined"); - Local<Object> options = args[1].As<Object>(); - Local<Value> transfer_list_v; - if (!options->Get(context, env->transfer_string()) - .ToLocal(&transfer_list_v)) { - return; + Local<Object> options = args[1].As<Object>(); + Local<Value> transfer_list_v; + if (!options->Get(context, env->transfer_string()) + .ToLocal(&transfer_list_v)) { + return; - // TODO(joyeecheung): implement this in JS land to avoid the C++ -> JS - // cost to convert a sequence into an array. - if (!GetTransferList(env, context, transfer_list_v, &transfer_list)) { + Local<Array> arr = transfer_list_v.As<Array>(); + for (size_t i = 0; i < length; i++) { + if (!arr->Get(context, i).ToLocal(&transfer_list[i])) { return; } } diff --git a/test/parallel/test-structuredClone-global.js b/test/parallel/test-structuredClone-global.js index 34b6abe32d3fcf..ef6ddc56a73cca 100644 --- a/test/parallel/test-structuredClone-global.js +++ b/test/parallel/test-structuredClone-global.js @@ -8,12 +8,12 @@ assert.throws(() => structuredClone(undefined, ''), { code: 'ERR_INVALID_ARG_TYP assert.throws(() => structuredClone(undefined, 1), { code: 'ERR_INVALID_ARG_TYPE' }); assert.throws(() => structuredClone(undefined, { transfer: 1 }), { code: 'ERR_INVALID_ARG_TYPE' }); assert.throws(() => structuredClone(undefined, { transfer: '' }), { code: 'ERR_INVALID_ARG_TYPE' }); +assert.throws(() => structuredClone(undefined, { transfer: null }), { code: 'ERR_INVALID_ARG_TYPE' }); // Options can be null or undefined. assert.strictEqual(structuredClone(undefined), undefined); assert.strictEqual(structuredClone(undefined, null), undefined); // Transfer can be null or undefined. -assert.strictEqual(structuredClone(undefined, { transfer: null }), undefined); assert.strictEqual(structuredClone(undefined, { }), undefined); // Transferables or its subclasses should be received with its closest transferable superclass @@ -43,6 +43,27 @@ for (const Transferrable of [File, Blob]) { assert.ok(extendedTransfer instanceof Transferrable); +// Transfer can be iterable +{ + const value = { + a: new ReadableStream(), + b: new WritableStream(), + }; + const cloned = structuredClone(value, { + transfer: { + *[Symbol.iterator]() { + yield value[key]; + } + } + } + assert.ok(value[key].locked); + assert.ok(!cloned[key].locked); { // See: https://github.com/nodejs/node/issues/49940 const cloned = structuredClone({}, {
[ "+const {", "+ if (optionsType !== 'Undefined' && optionsType !== 'Null' && optionsType !== 'Object') {", "+ size_t length = arr->Length();", "+ transfer_list.AllocateSufficientStorage(length);", "+ for (const key in value) {", "+ });", "+ for (const key in value) {" ]
[ 90, 120, 184, 185, 222, 227, 228 ]
{ "additions": 89, "author": "jazelly", "deletions": 24, "html_url": "https://github.com/nodejs/node/pull/55317", "issue_id": 55317, "merged_at": "2024-10-13T18:11:21Z", "omission_probability": 0.1, "pr_number": 55317, "repo": "nodejs/node", "title": "lib: convert transfer sequence to array in js", "total_changes": 113 }
878
diff --git a/src/memory_tracker-inl.h b/src/memory_tracker-inl.h index c99ff8607100ba..31ed6fad98ccc8 100644 --- a/src/memory_tracker-inl.h +++ b/src/memory_tracker-inl.h @@ -297,6 +297,27 @@ void MemoryTracker::TrackInlineField(const MemoryRetainer* retainer, CurrentNode()->size_ -= retainer->SelfSize(); } +template <typename T> +inline void MemoryTracker::TraitTrack(const T& retainer, + const char* edge_name) { + MemoryRetainerNode* n = + PushNode(MemoryRetainerTraits<T>::MemoryInfoName(retainer), + MemoryRetainerTraits<T>::SelfSize(retainer), + edge_name); + MemoryRetainerTraits<T>::MemoryInfo(this, retainer); + CHECK_EQ(CurrentNode(), n); + CHECK_NE(n->size_, 0); + PopNode(); +} + +template <typename T> +inline void MemoryTracker::TraitTrackInline(const T& retainer, + const char* edge_name) { + TraitTrack(retainer, edge_name); + CHECK(CurrentNode()); + CurrentNode()->size_ -= MemoryRetainerTraits<T>::SelfSize(retainer); +} + MemoryRetainerNode* MemoryTracker::CurrentNode() const { if (node_stack_.empty()) return nullptr; return node_stack_.top(); diff --git a/src/memory_tracker.h b/src/memory_tracker.h index cae4e5c7a663c1..4e0a2fbaaac4f6 100644 --- a/src/memory_tracker.h +++ b/src/memory_tracker.h @@ -138,6 +138,33 @@ class MemoryRetainer { } }; +/** + * MemoryRetainerTraits allows defining a custom memory info for a + * class that can not be modified to implement the MemoryRetainer interface. + * + * Example: + * + * template <> + * struct MemoryRetainerTraits<ExampleRetainer> { + * static void MemoryInfo(MemoryTracker* tracker, + * const ExampleRetainer& value) { + * tracker->TrackField("another_retainer", value.another_retainer_); + * } + * static const char* MemoryInfoName(const ExampleRetainer& value) { + * return "ExampleRetainer"; + * } + * static size_t SelfSize(const ExampleRetainer& value) { + * return sizeof(value); + * } + * }; + * + * This creates the following graph: + * Node / ExampleRetainer + * |> another_retainer :: Node / AnotherRetainerClass + */ +template <typename T, typename = void> +struct MemoryRetainerTraits {}; + class MemoryTracker { public: // Used to specify node name and size explicitly @@ -254,6 +281,13 @@ class MemoryTracker { inline void TrackInlineField(const MemoryRetainer* retainer, const char* edge_name = nullptr); + // MemoryRetainerTraits implementation helpers. + template <typename T> + inline void TraitTrack(const T& retainer, const char* edge_name = nullptr); + template <typename T> + inline void TraitTrackInline(const T& retainer, + const char* edge_name = nullptr); + inline v8::EmbedderGraph* graph() { return graph_; } inline v8::Isolate* isolate() { return isolate_; } diff --git a/src/node_url_pattern.cc b/src/node_url_pattern.cc index 3d6340565d1e06..188d7df6a04166 100644 --- a/src/node_url_pattern.cc +++ b/src/node_url_pattern.cc @@ -8,6 +8,48 @@ #include "path.h" #include "util-inl.h" +namespace node { +using node::url_pattern::URLPatternRegexProvider; + +template <> +struct MemoryRetainerTraits<ada::url_pattern<URLPatternRegexProvider>> { + using Type = ada::url_pattern<URLPatternRegexProvider>; + static void MemoryInfo(MemoryTracker* tracker, const Type& value) { + tracker->TraitTrackInline(value.protocol_component, "protocol_component"); + tracker->TraitTrackInline(value.username_component, "username_component"); + tracker->TraitTrackInline(value.password_component, "password_component"); + tracker->TraitTrackInline(value.hostname_component, "hostname_component"); + tracker->TraitTrackInline(value.port_component, "port_component"); + tracker->TraitTrackInline(value.pathname_component, "pathname_component"); + tracker->TraitTrackInline(value.search_component, "search_component"); + tracker->TraitTrackInline(value.hash_component, "hash_component"); + } + + static const char* MemoryInfoName(const Type& value) { + return "ada::url_pattern"; + } + + static size_t SelfSize(const Type& value) { return sizeof(value); } +}; + +template <> +struct MemoryRetainerTraits< + ada::url_pattern_component<URLPatternRegexProvider>> { + using Type = ada::url_pattern_component<URLPatternRegexProvider>; + static void MemoryInfo(MemoryTracker* tracker, const Type& value) { + tracker->TrackField("pattern", value.pattern); + tracker->TrackField("group_name_list", value.group_name_list); + } + + static const char* MemoryInfoName(const Type& value) { + return "ada::url_pattern_component"; + } + + static size_t SelfSize(const Type& value) { return sizeof(value); } +}; + +} // namespace node + namespace node::url_pattern { using v8::Array; @@ -123,13 +165,7 @@ URLPattern::URLPattern(Environment* env, } void URLPattern::MemoryInfo(MemoryTracker* tracker) const { - tracker->TrackFieldWithSize("protocol", url_pattern_.get_protocol().size()); - tracker->TrackFieldWithSize("username", url_pattern_.get_username().size()); - tracker->TrackFieldWithSize("password", url_pattern_.get_password().size()); - tracker->TrackFieldWithSize("hostname", url_pattern_.get_hostname().size()); - tracker->TrackFieldWithSize("pathname", url_pattern_.get_pathname().size()); - tracker->TrackFieldWithSize("search", url_pattern_.get_search().size()); - tracker->TrackFieldWithSize("hash", url_pattern_.get_hash().size()); + tracker->TraitTrackInline(url_pattern_, "url_pattern"); } void URLPattern::New(const FunctionCallbackInfo<Value>& args) { diff --git a/test/pummel/test-heapdump-urlpattern.js b/test/pummel/test-heapdump-urlpattern.js new file mode 100644 index 00000000000000..f903feae85b999 --- /dev/null +++ b/test/pummel/test-heapdump-urlpattern.js @@ -0,0 +1,25 @@ +// Flags: --expose-internals +'use strict'; +require('../common'); +const { validateSnapshotNodes } = require('../common/heap'); +const { URLPattern } = require('node:url'); + +validateSnapshotNodes('Node / URLPattern', []); +const urlPattern = new URLPattern('https://example.com/:id'); +validateSnapshotNodes('Node / URLPattern', [ + { + children: [ + { node_name: 'Node / ada::url_pattern', edge_name: 'url_pattern' }, + ], + }, +]); +validateSnapshotNodes('Node / ada::url_pattern', [ + { + children: [ + { node_name: 'Node / ada::url_pattern_component', edge_name: 'protocol_component' }, + ], + }, +]); + +// Use `urlPattern`. +console.log(urlPattern);
diff --git a/src/memory_tracker-inl.h b/src/memory_tracker-inl.h index c99ff8607100ba..31ed6fad98ccc8 100644 --- a/src/memory_tracker-inl.h +++ b/src/memory_tracker-inl.h @@ -297,6 +297,27 @@ void MemoryTracker::TrackInlineField(const MemoryRetainer* retainer, CurrentNode()->size_ -= retainer->SelfSize(); +inline void MemoryTracker::TraitTrack(const T& retainer, + const char* edge_name) { + MemoryRetainerNode* n = + PushNode(MemoryRetainerTraits<T>::MemoryInfoName(retainer), + MemoryRetainerTraits<T>::SelfSize(retainer), + CHECK_EQ(CurrentNode(), n); + CHECK_NE(n->size_, 0); + PopNode(); +inline void MemoryTracker::TraitTrackInline(const T& retainer, + const char* edge_name) { + TraitTrack(retainer, edge_name); + CHECK(CurrentNode()); MemoryRetainerNode* MemoryTracker::CurrentNode() const { if (node_stack_.empty()) return nullptr; return node_stack_.top(); diff --git a/src/memory_tracker.h b/src/memory_tracker.h index cae4e5c7a663c1..4e0a2fbaaac4f6 100644 --- a/src/memory_tracker.h +++ b/src/memory_tracker.h @@ -138,6 +138,33 @@ class MemoryRetainer { } }; +/** + * MemoryRetainerTraits allows defining a custom memory info for a + * class that can not be modified to implement the MemoryRetainer interface. + * Example: + * template <> + * struct MemoryRetainerTraits<ExampleRetainer> { + * static void MemoryInfo(MemoryTracker* tracker, + * const ExampleRetainer& value) { + * tracker->TrackField("another_retainer", value.another_retainer_); + * static const char* MemoryInfoName(const ExampleRetainer& value) { + * return "ExampleRetainer"; + * return sizeof(value); + * }; + * This creates the following graph: + * Node / ExampleRetainer + * |> another_retainer :: Node / AnotherRetainerClass + */ +template <typename T, typename = void> class MemoryTracker { public: // Used to specify node name and size explicitly @@ -254,6 +281,13 @@ class MemoryTracker { inline void TrackInlineField(const MemoryRetainer* retainer, const char* edge_name = nullptr); + // MemoryRetainerTraits implementation helpers. + inline void TraitTrack(const T& retainer, const char* edge_name = nullptr); + inline void TraitTrackInline(const T& retainer, + const char* edge_name = nullptr); inline v8::EmbedderGraph* graph() { return graph_; } inline v8::Isolate* isolate() { return isolate_; } diff --git a/src/node_url_pattern.cc b/src/node_url_pattern.cc index 3d6340565d1e06..188d7df6a04166 100644 --- a/src/node_url_pattern.cc +++ b/src/node_url_pattern.cc @@ -8,6 +8,48 @@ #include "path.h" #include "util-inl.h" +namespace node { +using node::url_pattern::URLPatternRegexProvider; +struct MemoryRetainerTraits<ada::url_pattern<URLPatternRegexProvider>> { + using Type = ada::url_pattern<URLPatternRegexProvider>; + tracker->TraitTrackInline(value.protocol_component, "protocol_component"); + tracker->TraitTrackInline(value.username_component, "username_component"); + tracker->TraitTrackInline(value.password_component, "password_component"); + tracker->TraitTrackInline(value.hostname_component, "hostname_component"); + tracker->TraitTrackInline(value.port_component, "port_component"); + tracker->TraitTrackInline(value.pathname_component, "pathname_component"); + tracker->TraitTrackInline(value.search_component, "search_component"); + tracker->TraitTrackInline(value.hash_component, "hash_component"); + return "ada::url_pattern"; +struct MemoryRetainerTraits< + ada::url_pattern_component<URLPatternRegexProvider>> { + using Type = ada::url_pattern_component<URLPatternRegexProvider>; + tracker->TrackField("group_name_list", value.group_name_list); + return "ada::url_pattern_component"; +} // namespace node namespace node::url_pattern { using v8::Array; @@ -123,13 +165,7 @@ URLPattern::URLPattern(Environment* env, void URLPattern::MemoryInfo(MemoryTracker* tracker) const { - tracker->TrackFieldWithSize("protocol", url_pattern_.get_protocol().size()); - tracker->TrackFieldWithSize("username", url_pattern_.get_username().size()); - tracker->TrackFieldWithSize("password", url_pattern_.get_password().size()); - tracker->TrackFieldWithSize("hostname", url_pattern_.get_hostname().size()); - tracker->TrackFieldWithSize("pathname", url_pattern_.get_pathname().size()); - tracker->TrackFieldWithSize("hash", url_pattern_.get_hash().size()); + tracker->TraitTrackInline(url_pattern_, "url_pattern"); void URLPattern::New(const FunctionCallbackInfo<Value>& args) { diff --git a/test/pummel/test-heapdump-urlpattern.js b/test/pummel/test-heapdump-urlpattern.js new file mode 100644 index 00000000000000..f903feae85b999 --- /dev/null +++ b/test/pummel/test-heapdump-urlpattern.js @@ -0,0 +1,25 @@ +// Flags: --expose-internals +'use strict'; +require('../common'); +const { validateSnapshotNodes } = require('../common/heap'); +const { URLPattern } = require('node:url'); +validateSnapshotNodes('Node / URLPattern', []); +const urlPattern = new URLPattern('https://example.com/:id'); + { node_name: 'Node / ada::url_pattern', edge_name: 'url_pattern' }, +validateSnapshotNodes('Node / ada::url_pattern', [ + { node_name: 'Node / ada::url_pattern_component', edge_name: 'protocol_component' }, +// Use `urlPattern`. +console.log(urlPattern);
[ "+ edge_name);", "+ MemoryRetainerTraits<T>::MemoryInfo(this, retainer);", "+ CurrentNode()->size_ -= MemoryRetainerTraits<T>::SelfSize(retainer);", "+ * static size_t SelfSize(const ExampleRetainer& value) {", "+struct MemoryRetainerTraits {};", "+ tracker->TrackField(\"pattern\", value.pattern);", "- tracker->TrackFieldWithSize(\"search\", url_pattern_.get_search().size());", "+validateSnapshotNodes('Node / URLPattern', [" ]
[ 14, 15, 26, 55, 65, 121, 146, 166 ]
{ "additions": 123, "author": "legendecas", "deletions": 7, "html_url": "https://github.com/nodejs/node/pull/56881", "issue_id": 56881, "merged_at": "2025-02-04T17:07:12Z", "omission_probability": 0.1, "pr_number": 56881, "repo": "nodejs/node", "title": "src: add memory retainer traits for external types", "total_changes": 130 }
879
diff --git a/test/fixtures/wpt/README.md b/test/fixtures/wpt/README.md index 7b6b8c29ce3d12..0bd606e7223401 100644 --- a/test/fixtures/wpt/README.md +++ b/test/fixtures/wpt/README.md @@ -29,7 +29,7 @@ Last update: - resources: https://github.com/web-platform-tests/wpt/tree/1e140d63ec/resources - streams: https://github.com/web-platform-tests/wpt/tree/bc9dcbbf1a/streams - url: https://github.com/web-platform-tests/wpt/tree/a23788b77a/url -- urlpattern: https://github.com/web-platform-tests/wpt/tree/1b56d89a26/urlpattern +- urlpattern: https://github.com/web-platform-tests/wpt/tree/ef6d83d789/urlpattern - user-timing: https://github.com/web-platform-tests/wpt/tree/5ae85bf826/user-timing - wasm/jsapi: https://github.com/web-platform-tests/wpt/tree/cde25e7e3c/wasm/jsapi - wasm/webapi: https://github.com/web-platform-tests/wpt/tree/fd1b23eeaa/wasm/webapi diff --git a/test/fixtures/wpt/urlpattern/resources/urlpatterntestdata.json b/test/fixtures/wpt/urlpattern/resources/urlpatterntestdata.json index 058079bb6d17ac..1d2ba25ff7d696 100644 --- a/test/fixtures/wpt/urlpattern/resources/urlpatterntestdata.json +++ b/test/fixtures/wpt/urlpattern/resources/urlpatterntestdata.json @@ -1121,6 +1121,63 @@ "hostname": { "input": "xn--caf-dma.com", "groups": {}} } }, + { + "pattern": ["http://\uD83D\uDEB2.com/"], + "inputs": ["http://\uD83D\uDEB2.com/"], + "exactly_empty_components": [ "port" ], + "expected_obj": { + "protocol": "http", + "hostname": "xn--h78h.com", + "pathname": "/" + }, + "expected_match": { + "protocol": { "input": "http", "groups": {}}, + "hostname": { "input": "xn--h78h.com", "groups": {}}, + "pathname": { "input": "/", "groups": {}} + } + }, + { + "pattern": ["http://\uD83D \uDEB2"], + "expected_obj": "error" + }, + { + "pattern": [{"hostname":"\uD83D \uDEB2"}], + "expected_obj": "error" + }, + { + "pattern": [{"pathname":"\uD83D \uDEB2"}], + "inputs": [], + "expected_obj": { + "pathname": "%EF%BF%BD%20%EF%BF%BD" + }, + "expected_match": null + }, + { + "pattern": [{"pathname":":\uD83D \uDEB2"}], + "expected_obj": "error" + }, + { + "pattern": [{"pathname":":a\uDB40\uDD00b"}], + "inputs": [], + "expected_obj": { + "pathname": ":a\uDB40\uDD00b" + }, + "expected_match": null + }, + { + "pattern": [{"pathname":"test/:a\uD801\uDC50b"}], + "inputs": [{"pathname":"test/foo"}], + "expected_obj": { + "pathname": "test/:a\uD801\uDC50b" + }, + "expected_match": { + "pathname": { "input": "test/foo", "groups": { "a\uD801\uDC50b": "foo" }} + } + }, + { + "pattern": [{"pathname":":\uD83D\uDEB2"}], + "expected_obj": "error" + }, { "pattern": [{ "port": "" }], "inputs": [{ "protocol": "http", "port": "80" }], diff --git a/test/fixtures/wpt/versions.json b/test/fixtures/wpt/versions.json index 5ac57c34db0ed5..14b9e1c3ce488a 100644 --- a/test/fixtures/wpt/versions.json +++ b/test/fixtures/wpt/versions.json @@ -76,7 +76,7 @@ "path": "url" }, "urlpattern": { - "commit": "1b56d89a261b86dedfd2854b53c1732e435f1f57", + "commit": "ef6d83d789483763207af8cedcbf1f3c1317b981", "path": "urlpattern" }, "user-timing": {
diff --git a/test/fixtures/wpt/README.md b/test/fixtures/wpt/README.md index 7b6b8c29ce3d12..0bd606e7223401 100644 --- a/test/fixtures/wpt/README.md +++ b/test/fixtures/wpt/README.md @@ -29,7 +29,7 @@ Last update: - resources: https://github.com/web-platform-tests/wpt/tree/1e140d63ec/resources - streams: https://github.com/web-platform-tests/wpt/tree/bc9dcbbf1a/streams - url: https://github.com/web-platform-tests/wpt/tree/a23788b77a/url -- urlpattern: https://github.com/web-platform-tests/wpt/tree/1b56d89a26/urlpattern +- urlpattern: https://github.com/web-platform-tests/wpt/tree/ef6d83d789/urlpattern - user-timing: https://github.com/web-platform-tests/wpt/tree/5ae85bf826/user-timing - wasm/jsapi: https://github.com/web-platform-tests/wpt/tree/cde25e7e3c/wasm/jsapi - wasm/webapi: https://github.com/web-platform-tests/wpt/tree/fd1b23eeaa/wasm/webapi diff --git a/test/fixtures/wpt/urlpattern/resources/urlpatterntestdata.json b/test/fixtures/wpt/urlpattern/resources/urlpatterntestdata.json index 058079bb6d17ac..1d2ba25ff7d696 100644 --- a/test/fixtures/wpt/urlpattern/resources/urlpatterntestdata.json +++ b/test/fixtures/wpt/urlpattern/resources/urlpatterntestdata.json @@ -1121,6 +1121,63 @@ "hostname": { "input": "xn--caf-dma.com", "groups": {}} } + "pattern": ["http://\uD83D\uDEB2.com/"], + "inputs": ["http://\uD83D\uDEB2.com/"], + "exactly_empty_components": [ "port" ], + "protocol": "http", + "hostname": "xn--h78h.com", + "pathname": "/" + "protocol": { "input": "http", "groups": {}}, + "hostname": { "input": "xn--h78h.com", "groups": {}}, + "pathname": { "input": "/", "groups": {}} + "pattern": ["http://\uD83D \uDEB2"], + "pattern": [{"hostname":"\uD83D \uDEB2"}], + "pattern": [{"pathname":"\uD83D \uDEB2"}], + "pathname": "%EF%BF%BD%20%EF%BF%BD" + "pattern": [{"pathname":":\uD83D \uDEB2"}], + "pattern": [{"pathname":":a\uDB40\uDD00b"}], + "pathname": ":a\uDB40\uDD00b" + "inputs": [{"pathname":"test/foo"}], + "pathname": "test/:a\uD801\uDC50b" + "pathname": { "input": "test/foo", "groups": { "a\uD801\uDC50b": "foo" }} + "pattern": [{"pathname":":\uD83D\uDEB2"}], { "pattern": [{ "port": "" }], "inputs": [{ "protocol": "http", "port": "80" }], diff --git a/test/fixtures/wpt/versions.json b/test/fixtures/wpt/versions.json index 5ac57c34db0ed5..14b9e1c3ce488a 100644 --- a/test/fixtures/wpt/versions.json +++ b/test/fixtures/wpt/versions.json @@ -76,7 +76,7 @@ "path": "url" "urlpattern": { - "commit": "1b56d89a261b86dedfd2854b53c1732e435f1f57", "path": "urlpattern" "user-timing": {
[ "+ \"pattern\": [{\"pathname\":\"test/:a\\uD801\\uDC50b\"}],", "+ \"commit\": \"ef6d83d789483763207af8cedcbf1f3c1317b981\"," ]
[ 65, 90 ]
{ "additions": 59, "author": "nodejs-github-bot", "deletions": 2, "html_url": "https://github.com/nodejs/node/pull/56984", "issue_id": 56984, "merged_at": "2025-02-12T00:32:08Z", "omission_probability": 0.1, "pr_number": 56984, "repo": "nodejs/node", "title": "test: update WPT for urlpattern to ef6d83d789", "total_changes": 61 }
880
diff --git a/src/node_url_pattern.cc b/src/node_url_pattern.cc index 3d6340565d1e06..cbe1b473f0d54b 100644 --- a/src/node_url_pattern.cc +++ b/src/node_url_pattern.cc @@ -103,13 +103,14 @@ URLPatternRegexProvider::regex_search(std::string_view input, return std::nullopt; } + // V8 checks that the regexp exec result is one of the correct types. + DCHECK_IMPLIES(!entry->IsUndefined(), entry->IsString()); + if (entry->IsUndefined()) { result.emplace_back(std::nullopt); } else if (entry->IsString()) { Utf8Value utf8_entry(isolate, entry.As<String>()); result.emplace_back(utf8_entry.ToString()); - } else { - UNREACHABLE("v8::RegExp::Exec return a non-string, non-undefined value."); } } return result; @@ -184,7 +185,8 @@ void URLPattern::New(const FunctionCallbackInfo<Value>& args) { return; } } else { - THROW_ERR_MISSING_ARGS(env, "baseURL or options must be provided"); + THROW_ERR_INVALID_ARG_TYPE(env, + "second argument must be a string or object"); return; } diff --git a/test/parallel/test-urlpattern-types.js b/test/parallel/test-urlpattern-types.js new file mode 100644 index 00000000000000..71133a7f6e48ac --- /dev/null +++ b/test/parallel/test-urlpattern-types.js @@ -0,0 +1,46 @@ +'use strict'; + +require('../common'); + +const { URLPattern } = require('url'); +const { throws } = require('assert'); + +// Verifies that calling URLPattern with no new keyword throws. +throws(() => URLPattern(), { + code: 'ERR_CONSTRUCT_CALL_REQUIRED', +}); + +// Verifies that type checks are performed on the arguments. +throws(() => new URLPattern(1), { + code: 'ERR_INVALID_ARG_TYPE', +}); + +throws(() => new URLPattern({}, 1), { + code: 'ERR_INVALID_ARG_TYPE', +}); + +throws(() => new URLPattern({}, '', 1), { + code: 'ERR_INVALID_ARG_TYPE', +}); + +throws(() => new URLPattern({}, { ignoreCase: '' }), { + code: 'ERR_INVALID_ARG_TYPE', +}); + +const pattern = new URLPattern(); + +throws(() => pattern.exec(1), { + code: 'ERR_INVALID_ARG_TYPE', +}); + +throws(() => pattern.exec('', 1), { + code: 'ERR_INVALID_ARG_TYPE', +}); + +throws(() => pattern.test(1), { + code: 'ERR_INVALID_ARG_TYPE', +}); + +throws(() => pattern.test('', 1), { + code: 'ERR_INVALID_ARG_TYPE', +});
diff --git a/src/node_url_pattern.cc b/src/node_url_pattern.cc index 3d6340565d1e06..cbe1b473f0d54b 100644 --- a/src/node_url_pattern.cc +++ b/src/node_url_pattern.cc @@ -103,13 +103,14 @@ URLPatternRegexProvider::regex_search(std::string_view input, return std::nullopt; + DCHECK_IMPLIES(!entry->IsUndefined(), entry->IsString()); if (entry->IsUndefined()) { result.emplace_back(std::nullopt); } else if (entry->IsString()) { Utf8Value utf8_entry(isolate, entry.As<String>()); result.emplace_back(utf8_entry.ToString()); - } else { - UNREACHABLE("v8::RegExp::Exec return a non-string, non-undefined value."); } return result; @@ -184,7 +185,8 @@ void URLPattern::New(const FunctionCallbackInfo<Value>& args) { return; } } else { - THROW_ERR_MISSING_ARGS(env, "baseURL or options must be provided"); + THROW_ERR_INVALID_ARG_TYPE(env, + "second argument must be a string or object"); return; diff --git a/test/parallel/test-urlpattern-types.js b/test/parallel/test-urlpattern-types.js new file mode 100644 index 00000000000000..71133a7f6e48ac --- /dev/null +++ b/test/parallel/test-urlpattern-types.js @@ -0,0 +1,46 @@ +'use strict'; +const { URLPattern } = require('url'); +const { throws } = require('assert'); +// Verifies that calling URLPattern with no new keyword throws. +throws(() => URLPattern(), { + code: 'ERR_CONSTRUCT_CALL_REQUIRED', +// Verifies that type checks are performed on the arguments. +throws(() => new URLPattern(1), { +throws(() => new URLPattern({}, 1), { +throws(() => new URLPattern({}, { ignoreCase: '' }), { +const pattern = new URLPattern(); +throws(() => pattern.exec(1), { +throws(() => pattern.exec('', 1), { +throws(() => pattern.test(1), { +throws(() => pattern.test('', 1), {
[ "+ // V8 checks that the regexp exec result is one of the correct types.", "+require('../common');", "+throws(() => new URLPattern({}, '', 1), {" ]
[ 8, 39, 58 ]
{ "additions": 51, "author": "jasnell", "deletions": 3, "html_url": "https://github.com/nodejs/node/pull/56878", "issue_id": 56878, "merged_at": "2025-02-04T04:52:39Z", "omission_probability": 0.1, "pr_number": 56878, "repo": "nodejs/node", "title": "src,test: expand test coverage for urlpattern and fix error", "total_changes": 54 }
881
diff --git a/benchmark/url/urlpattern-parse.js b/benchmark/url/urlpattern-parse.js new file mode 100644 index 00000000000000..65bfc90613006c --- /dev/null +++ b/benchmark/url/urlpattern-parse.js @@ -0,0 +1,28 @@ +'use strict'; +const common = require('../common.js'); +const { URLPattern } = require('url'); +const { ok } = require('assert'); + +const tests = [ + 'https://(sub.)?example(.com/)foo', + { 'hostname': 'xn--caf-dma.com' }, + { 'pathname': '/foo', 'search': 'bar', 'hash': 'baz', + 'baseURL': 'https://example.com:8080' }, + { 'pathname': '/([[a-z]--a])' }, +]; + +const bench = common.createBenchmark(main, { + pattern: tests.map(JSON.stringify), + n: [1e5], +}); + +function main({ pattern, n }) { + const inputPattern = JSON.parse(pattern); + let deadcode; + bench.start(); + for (let i = 0; i < n; i += 1) { + deadcode = new URLPattern(inputPattern); + } + bench.end(n); + ok(deadcode); +} diff --git a/benchmark/url/urlpattern-test.js b/benchmark/url/urlpattern-test.js new file mode 100644 index 00000000000000..445380748b3239 --- /dev/null +++ b/benchmark/url/urlpattern-test.js @@ -0,0 +1,30 @@ +'use strict'; +const common = require('../common.js'); +const { URLPattern } = require('url'); +const { notStrictEqual } = require('assert'); + +const tests = [ + 'https://(sub.)?example(.com/)foo', + { 'hostname': 'xn--caf-dma.com' }, + { 'pathname': '/foo', 'search': 'bar', 'hash': 'baz', + 'baseURL': 'https://example.com:8080' }, + { 'pathname': '/([[a-z]--a])' }, +]; + +const bench = common.createBenchmark(main, { + pattern: tests.map(JSON.stringify), + n: [1e5], +}); + +function main({ pattern, n }) { + const inputPattern = JSON.parse(pattern); + const urlpattern = new URLPattern(inputPattern); + + let deadcode; + bench.start(); + for (let i = 0; i < n; i += 1) { + deadcode = urlpattern.test('https://sub.example.com/foo'); + } + bench.end(n); + notStrictEqual(deadcode, undefined); // We don't care if it is true or false. +}
diff --git a/benchmark/url/urlpattern-parse.js b/benchmark/url/urlpattern-parse.js index 00000000000000..65bfc90613006c +++ b/benchmark/url/urlpattern-parse.js @@ -0,0 +1,28 @@ +const { ok } = require('assert'); + deadcode = new URLPattern(inputPattern); + ok(deadcode); diff --git a/benchmark/url/urlpattern-test.js b/benchmark/url/urlpattern-test.js index 00000000000000..445380748b3239 +++ b/benchmark/url/urlpattern-test.js @@ -0,0 +1,30 @@ +const { notStrictEqual } = require('assert'); + const urlpattern = new URLPattern(inputPattern); + deadcode = urlpattern.test('https://sub.example.com/foo'); + notStrictEqual(deadcode, undefined); // We don't care if it is true or false.
[]
[]
{ "additions": 58, "author": "jasnell", "deletions": 0, "html_url": "https://github.com/nodejs/node/pull/56882", "issue_id": 56882, "merged_at": "2025-02-04T17:57:46Z", "omission_probability": 0.1, "pr_number": 56882, "repo": "nodejs/node", "title": "benchmarks: add simple parse and test benchmarks for URLPattern", "total_changes": 58 }
882
diff --git a/src/node_url_pattern.cc b/src/node_url_pattern.cc index 3d6340565d1e06..8c09b3fbb6a492 100644 --- a/src/node_url_pattern.cc +++ b/src/node_url_pattern.cc @@ -26,6 +26,7 @@ using v8::Object; using v8::PropertyAttribute; using v8::ReadOnly; using v8::RegExp; +using v8::Signature; using v8::String; using v8::Value; @@ -720,58 +721,71 @@ static void Initialize(Local<Object> target, auto prototype_template = ctor_tmpl->PrototypeTemplate(); ctor_tmpl->SetClassName(FIXED_ONE_BYTE_STRING(isolate, "URLPattern")); + // The signature is used to prevent the property accessors from being + // called on the wrong receiver object (`this`) + auto signature = Signature::New(isolate, ctor_tmpl); + instance_template->SetInternalFieldCount(URLPattern::kInternalFieldCount); prototype_template->SetAccessorProperty( env->protocol_string(), - FunctionTemplate::New(isolate, URLPattern::Protocol), + FunctionTemplate::New( + isolate, URLPattern::Protocol, Local<Value>(), signature), Local<FunctionTemplate>(), attributes); prototype_template->SetAccessorProperty( env->username_string(), - FunctionTemplate::New(isolate, URLPattern::Username), + FunctionTemplate::New( + isolate, URLPattern::Username, Local<Value>(), signature), Local<FunctionTemplate>(), attributes); prototype_template->SetAccessorProperty( env->password_string(), - FunctionTemplate::New(isolate, URLPattern::Password), + FunctionTemplate::New( + isolate, URLPattern::Password, Local<Value>(), signature), Local<FunctionTemplate>(), attributes); prototype_template->SetAccessorProperty( env->hostname_string(), - FunctionTemplate::New(isolate, URLPattern::Hostname), + FunctionTemplate::New( + isolate, URLPattern::Hostname, Local<Value>(), signature), Local<FunctionTemplate>(), attributes); prototype_template->SetAccessorProperty( env->port_string(), - FunctionTemplate::New(isolate, URLPattern::Port), + FunctionTemplate::New( + isolate, URLPattern::Port, Local<Value>(), signature), Local<FunctionTemplate>(), attributes); prototype_template->SetAccessorProperty( env->pathname_string(), - FunctionTemplate::New(isolate, URLPattern::Pathname), + FunctionTemplate::New( + isolate, URLPattern::Pathname, Local<Value>(), signature), Local<FunctionTemplate>(), attributes); prototype_template->SetAccessorProperty( env->search_string(), - FunctionTemplate::New(isolate, URLPattern::Search), + FunctionTemplate::New( + isolate, URLPattern::Search, Local<Value>(), signature), Local<FunctionTemplate>(), attributes); prototype_template->SetAccessorProperty( env->hash_string(), - FunctionTemplate::New(isolate, URLPattern::Hash), + FunctionTemplate::New( + isolate, URLPattern::Hash, Local<Value>(), signature), Local<FunctionTemplate>(), attributes); prototype_template->SetAccessorProperty( env->has_regexp_groups_string(), - FunctionTemplate::New(isolate, URLPattern::HasRegexpGroups), + FunctionTemplate::New( + isolate, URLPattern::HasRegexpGroups, Local<Value>(), signature), Local<FunctionTemplate>(), attributes); diff --git a/test/parallel/test-urlpattern-invalidthis.js b/test/parallel/test-urlpattern-invalidthis.js new file mode 100644 index 00000000000000..fa4504199df516 --- /dev/null +++ b/test/parallel/test-urlpattern-invalidthis.js @@ -0,0 +1,40 @@ +'use strict'; + +require('../common'); + +const { URLPattern } = require('url'); +const { throws } = require('assert'); + +const pattern = new URLPattern(); +const proto = Object.getPrototypeOf(pattern); + +// Verifies that attempts to call the property getters on a URLPattern +// with the incorrect `this` will not crash the process. +[ + 'protocol', + 'username', + 'password', + 'hostname', + 'port', + 'pathname', + 'search', + 'hash', + 'hasRegExpGroups', +].forEach((i) => { + const prop = Object.getOwnPropertyDescriptor(proto, i).get; + throws(() => prop({}), { + message: 'Illegal invocation', + }, i); +}); + +// Verifies that attempts to call the exec and test functions +// with the wrong this also throw + +const { test, exec } = pattern; + +throws(() => test({}), { + message: 'Illegal invocation', +}); +throws(() => exec({}), { + message: 'Illegal invocation', +});
diff --git a/src/node_url_pattern.cc b/src/node_url_pattern.cc index 3d6340565d1e06..8c09b3fbb6a492 100644 --- a/src/node_url_pattern.cc +++ b/src/node_url_pattern.cc @@ -26,6 +26,7 @@ using v8::Object; using v8::PropertyAttribute; using v8::ReadOnly; using v8::RegExp; +using v8::Signature; using v8::String; using v8::Value; @@ -720,58 +721,71 @@ static void Initialize(Local<Object> target, auto prototype_template = ctor_tmpl->PrototypeTemplate(); ctor_tmpl->SetClassName(FIXED_ONE_BYTE_STRING(isolate, "URLPattern")); + // The signature is used to prevent the property accessors from being + // called on the wrong receiver object (`this`) + auto signature = Signature::New(isolate, ctor_tmpl); instance_template->SetInternalFieldCount(URLPattern::kInternalFieldCount); env->protocol_string(), - FunctionTemplate::New(isolate, URLPattern::Protocol), + isolate, URLPattern::Protocol, Local<Value>(), signature), env->username_string(), - FunctionTemplate::New(isolate, URLPattern::Username), + isolate, URLPattern::Username, Local<Value>(), signature), env->password_string(), - FunctionTemplate::New(isolate, URLPattern::Password), + isolate, URLPattern::Password, Local<Value>(), signature), env->hostname_string(), - FunctionTemplate::New(isolate, URLPattern::Hostname), + isolate, URLPattern::Hostname, Local<Value>(), signature), env->port_string(), - FunctionTemplate::New(isolate, URLPattern::Port), + isolate, URLPattern::Port, Local<Value>(), signature), env->pathname_string(), + isolate, URLPattern::Pathname, Local<Value>(), signature), env->search_string(), - FunctionTemplate::New(isolate, URLPattern::Search), + isolate, URLPattern::Search, Local<Value>(), signature), env->hash_string(), - FunctionTemplate::New(isolate, URLPattern::Hash), + isolate, URLPattern::Hash, Local<Value>(), signature), env->has_regexp_groups_string(), - FunctionTemplate::New(isolate, URLPattern::HasRegexpGroups), + isolate, URLPattern::HasRegexpGroups, Local<Value>(), signature), diff --git a/test/parallel/test-urlpattern-invalidthis.js b/test/parallel/test-urlpattern-invalidthis.js new file mode 100644 index 00000000000000..fa4504199df516 --- /dev/null +++ b/test/parallel/test-urlpattern-invalidthis.js @@ -0,0 +1,40 @@ +'use strict'; +const { throws } = require('assert'); +const pattern = new URLPattern(); +const proto = Object.getPrototypeOf(pattern); +// Verifies that attempts to call the property getters on a URLPattern +// with the incorrect `this` will not crash the process. +[ + 'protocol', + 'username', + 'password', + 'hostname', + 'pathname', + 'search', + 'hasRegExpGroups', +].forEach((i) => { + const prop = Object.getOwnPropertyDescriptor(proto, i).get; + throws(() => prop({}), { + message: 'Illegal invocation', + }, i); +// Verifies that attempts to call the exec and test functions +// with the wrong this also throw +const { test, exec } = pattern; +throws(() => test({}), { +throws(() => exec({}), {
[ "- FunctionTemplate::New(isolate, URLPattern::Pathname),", "+require('../common');", "+const { URLPattern } = require('url');", "+ 'port',", "+ 'hash'," ]
[ 63, 101, 103, 116, 119 ]
{ "additions": 63, "author": "jasnell", "deletions": 9, "html_url": "https://github.com/nodejs/node/pull/56877", "issue_id": 56877, "merged_at": "2025-02-04T03:34:31Z", "omission_probability": 0.1, "pr_number": 56877, "repo": "nodejs/node", "title": "src: prevent URLPattern property getters from crashing with invalid this", "total_changes": 72 }
883
diff --git a/deps/ada/ada.cpp b/deps/ada/ada.cpp index c9fa03b1a7b8f0..a60102854930be 100644 --- a/deps/ada/ada.cpp +++ b/deps/ada/ada.cpp @@ -1,4 +1,4 @@ -/* auto-generated on 2025-01-30 18:48:55 -0500. Do not edit! */ +/* auto-generated on 2025-02-11 09:47:50 -0500. Do not edit! */ /* begin file src/ada.cpp */ #include "ada.h" /* begin file src/checkers.cpp */ diff --git a/deps/ada/ada.h b/deps/ada/ada.h index c32e3cfaa03201..c997f0981ce36b 100644 --- a/deps/ada/ada.h +++ b/deps/ada/ada.h @@ -1,4 +1,4 @@ -/* auto-generated on 2025-01-30 18:48:55 -0500. Do not edit! */ +/* auto-generated on 2025-02-11 09:47:50 -0500. Do not edit! */ /* begin file include/ada.h */ /** * @file ada.h @@ -1219,6 +1219,7 @@ constexpr ada::scheme::type get_scheme_type(std::string_view scheme) noexcept; #endif // ADA_SCHEME_H /* end file include/ada/scheme.h */ +#include <string> #include <string_view> namespace ada { @@ -1352,6 +1353,7 @@ struct url_base { #endif /* end file include/ada/url_base.h */ +#include <string> #include <string_view> #include <optional> @@ -4118,6 +4120,9 @@ void swap(expected<T, E> &lhs, #ifndef ADA_URL_PATTERN_REGEX_H #define ADA_URL_PATTERN_REGEX_H +#include <string> +#include <string_view> + #ifdef ADA_USE_UNSAFE_STD_REGEX_PROVIDER #include <regex> #endif // ADA_USE_UNSAFE_STD_REGEX_PROVIDER @@ -4216,7 +4221,9 @@ concept url_pattern_encoding_callback = requires(F f, std::string_view sv) { // either a string or a URLPatternInit struct. If a string is given, // it will be parsed to create a URLPatternInit. The URLPatternInit // API is defined as part of the URLPattern specification. +// All provided strings must be valid UTF-8. struct url_pattern_init { + // All strings must be valid UTF-8. // @see https://urlpattern.spec.whatwg.org/#process-a-urlpatterninit static tl::expected<url_pattern_init, errors> process( url_pattern_init init, std::string_view type, @@ -4276,15 +4283,23 @@ struct url_pattern_init { #endif // ADA_TESTING bool operator==(const url_pattern_init&) const; - + // If present, must be valid UTF-8. std::optional<std::string> protocol{}; + // If present, must be valid UTF-8. std::optional<std::string> username{}; + // If present, must be valid UTF-8. std::optional<std::string> password{}; + // If present, must be valid UTF-8. std::optional<std::string> hostname{}; + // If present, must be valid UTF-8. std::optional<std::string> port{}; + // If present, must be valid UTF-8. std::optional<std::string> pathname{}; + // If present, must be valid UTF-8. std::optional<std::string> search{}; + // If present, must be valid UTF-8. std::optional<std::string> hash{}; + // If present, must be valid UTF-8. std::optional<std::string> base_url{}; }; } // namespace ada @@ -4366,6 +4381,7 @@ tl::expected<url_pattern<regex_provider>, errors> parse_url_pattern_impl( #ifndef ADA_IMPLEMENTATION_H #define ADA_IMPLEMENTATION_H +#include <string> #include <string_view> #include <optional> @@ -4379,6 +4395,7 @@ tl::expected<url_pattern<regex_provider>, errors> parse_url_pattern_impl( #include <algorithm> #include <optional> +#include <ostream> #include <string> #include <string_view> @@ -5040,7 +5057,9 @@ std::string href_from_file(std::string_view path); #endif // ADA_IMPLEMENTATION_H /* end file include/ada/implementation.h */ +#include <ostream> #include <string> +#include <string_view> #include <unordered_map> #include <variant> #include <vector> @@ -5219,6 +5238,8 @@ class url_pattern_component { bool has_regexp_groups = false; }; +// A URLPattern input can be either a string or a URLPatternInit object. +// If it is a string, it must be a valid UTF-8 string. using url_pattern_input = std::variant<std::string_view, url_pattern_init>; // A struct providing the URLPattern matching results for all @@ -5251,12 +5272,16 @@ struct url_pattern_options { // defined in https://wicg.github.io/urlpattern. // More information about the URL Pattern syntax can be found at // https://developer.mozilla.org/en-US/docs/Web/API/URL_Pattern_API +// +// We require all strings to be valid UTF-8: it is the user's responsibility +// to ensure that the provided strings are valid UTF-8. template <url_pattern_regex::regex_concept regex_provider> class url_pattern { public: url_pattern() = default; /** + * If non-null, base_url must pointer at a valid UTF-8 string. * @see https://urlpattern.spec.whatwg.org/#dom-urlpattern-exec */ result<std::optional<url_pattern_result>> exec( @@ -5264,6 +5289,7 @@ class url_pattern { const std::string_view* base_url = nullptr); /** + * If non-null, base_url must pointer at a valid UTF-8 string. * @see https://urlpattern.spec.whatwg.org/#dom-urlpattern-test */ result<bool> test(const url_pattern_input& input, @@ -5725,6 +5751,7 @@ std::string generate_segment_wildcard_regexp( #endif /* end file include/ada/url_pattern_helpers.h */ +#include <string> #include <string_view> #include <variant> @@ -6257,6 +6284,7 @@ ada_warn_unused std::string to_string(ada::state s); #include <string> +#include <string_view> #include <optional> /** @@ -6870,6 +6898,7 @@ namespace ada { #ifndef ADA_URL_AGGREGATOR_H #define ADA_URL_AGGREGATOR_H +#include <ostream> #include <string> #include <string_view> #include <variant> @@ -7255,6 +7284,7 @@ ada_really_inline size_t percent_encode_index(const std::string_view input, /* end file include/ada/unicode-inl.h */ #include <charconv> +#include <ostream> #include <string_view> namespace ada { @@ -8406,6 +8436,8 @@ using url_search_params_entries_iter = url_search_params_iter_type::ENTRIES>; /** + * We require all strings to be valid UTF-8. It is the user's responsibility to + * ensure that the provided strings are valid UTF-8. * @see https://url.spec.whatwg.org/#interface-urlsearchparams */ struct url_search_params { @@ -8428,6 +8460,7 @@ struct url_search_params { [[nodiscard]] inline size_t size() const noexcept; /** + * Both key and value must be valid UTF-8. * @see https://url.spec.whatwg.org/#dom-urlsearchparams-append */ inline void append(std::string_view key, std::string_view value); @@ -8455,6 +8488,7 @@ struct url_search_params { inline bool has(std::string_view key, std::string_view value) noexcept; /** + * Both key and value must be valid UTF-8. * @see https://url.spec.whatwg.org/#dom-urlsearchparams-set */ inline void set(std::string_view key, std::string_view value); @@ -8518,6 +8552,7 @@ struct url_search_params { std::vector<key_value_pair> params{}; /** + * The init parameter must be valid UTF-8. * @see https://url.spec.whatwg.org/#concept-urlencoded-parser */ void initialize(std::string_view init); @@ -8724,10 +8759,80 @@ inline void url_search_params::remove(const std::string_view key, } inline void url_search_params::sort() { - std::ranges::stable_sort( - params, [](const key_value_pair &lhs, const key_value_pair &rhs) { - return lhs.first < rhs.first; - }); + // We rely on the fact that the content is valid UTF-8. + std::ranges::stable_sort(params, [](const key_value_pair &lhs, + const key_value_pair &rhs) { + size_t i = 0, j = 0; + uint32_t low_surrogate1 = 0, low_surrogate2 = 0; + while ((i < lhs.first.size() || low_surrogate1 != 0) && + (j < rhs.first.size() || low_surrogate2 != 0)) { + uint32_t codePoint1 = 0, codePoint2 = 0; + + if (low_surrogate1 != 0) { + codePoint1 = low_surrogate1; + low_surrogate1 = 0; + } else { + uint8_t c1 = uint8_t(lhs.first[i]); + if (c1 <= 0x7F) { + codePoint1 = c1; + i++; + } else if (c1 <= 0xDF) { + codePoint1 = ((c1 & 0x1F) << 6) | (uint8_t(lhs.first[i + 1]) & 0x3F); + i += 2; + } else if (c1 <= 0xEF) { + codePoint1 = ((c1 & 0x0F) << 12) | + ((uint8_t(lhs.first[i + 1]) & 0x3F) << 6) | + (uint8_t(lhs.first[i + 2]) & 0x3F); + i += 3; + } else { + codePoint1 = ((c1 & 0x07) << 18) | + ((uint8_t(lhs.first[i + 1]) & 0x3F) << 12) | + ((uint8_t(lhs.first[i + 2]) & 0x3F) << 6) | + (uint8_t(lhs.first[i + 3]) & 0x3F); + i += 4; + + codePoint1 -= 0x10000; + uint16_t high_surrogate = uint16_t(0xD800 + (codePoint1 >> 10)); + low_surrogate1 = uint16_t(0xDC00 + (codePoint1 & 0x3FF)); + codePoint1 = high_surrogate; + } + } + + if (low_surrogate2 != 0) { + codePoint2 = low_surrogate2; + low_surrogate2 = 0; + } else { + uint8_t c2 = uint8_t(rhs.first[j]); + if (c2 <= 0x7F) { + codePoint2 = c2; + j++; + } else if (c2 <= 0xDF) { + codePoint2 = ((c2 & 0x1F) << 6) | (uint8_t(rhs.first[j + 1]) & 0x3F); + j += 2; + } else if (c2 <= 0xEF) { + codePoint2 = ((c2 & 0x0F) << 12) | + ((uint8_t(rhs.first[j + 1]) & 0x3F) << 6) | + (uint8_t(rhs.first[j + 2]) & 0x3F); + j += 3; + } else { + codePoint2 = ((c2 & 0x07) << 18) | + ((uint8_t(rhs.first[j + 1]) & 0x3F) << 12) | + ((uint8_t(rhs.first[j + 2]) & 0x3F) << 6) | + (uint8_t(rhs.first[j + 3]) & 0x3F); + j += 4; + codePoint2 -= 0x10000; + uint16_t high_surrogate = uint16_t(0xD800 + (codePoint2 >> 10)); + low_surrogate2 = uint16_t(0xDC00 + (codePoint2 & 0x3FF)); + codePoint2 = high_surrogate; + } + } + + if (codePoint1 != codePoint2) { + return (codePoint1 < codePoint2); + } + } + return (j < rhs.first.size() || low_surrogate2 != 0); + }); } inline url_search_params_keys_iter url_search_params::get_keys() { @@ -10330,14 +10435,14 @@ constructor_string_parser<regex_provider>::parse(std::string_view input) { #ifndef ADA_ADA_VERSION_H #define ADA_ADA_VERSION_H -#define ADA_VERSION "3.0.1" +#define ADA_VERSION "3.1.0" namespace ada { enum { ADA_VERSION_MAJOR = 3, - ADA_VERSION_MINOR = 0, - ADA_VERSION_REVISION = 1, + ADA_VERSION_MINOR = 1, + ADA_VERSION_REVISION = 0, }; } // namespace ada
diff --git a/deps/ada/ada.cpp b/deps/ada/ada.cpp index c9fa03b1a7b8f0..a60102854930be 100644 --- a/deps/ada/ada.cpp +++ b/deps/ada/ada.cpp /* begin file src/ada.cpp */ #include "ada.h" /* begin file src/checkers.cpp */ diff --git a/deps/ada/ada.h b/deps/ada/ada.h index c32e3cfaa03201..c997f0981ce36b 100644 --- a/deps/ada/ada.h +++ b/deps/ada/ada.h /* begin file include/ada.h */ * @file ada.h @@ -1219,6 +1219,7 @@ constexpr ada::scheme::type get_scheme_type(std::string_view scheme) noexcept; #endif // ADA_SCHEME_H /* end file include/ada/scheme.h */ @@ -1352,6 +1353,7 @@ struct url_base { /* end file include/ada/url_base.h */ @@ -4118,6 +4120,9 @@ void swap(expected<T, E> &lhs, #ifndef ADA_URL_PATTERN_REGEX_H #define ADA_URL_PATTERN_REGEX_H #ifdef ADA_USE_UNSAFE_STD_REGEX_PROVIDER #include <regex> #endif // ADA_USE_UNSAFE_STD_REGEX_PROVIDER @@ -4216,7 +4221,9 @@ concept url_pattern_encoding_callback = requires(F f, std::string_view sv) { // either a string or a URLPatternInit struct. If a string is given, // it will be parsed to create a URLPatternInit. The URLPatternInit // API is defined as part of the URLPattern specification. struct url_pattern_init { + // All strings must be valid UTF-8. // @see https://urlpattern.spec.whatwg.org/#process-a-urlpatterninit static tl::expected<url_pattern_init, errors> process( url_pattern_init init, std::string_view type, @@ -4276,15 +4283,23 @@ struct url_pattern_init { #endif // ADA_TESTING bool operator==(const url_pattern_init&) const; - std::optional<std::string> protocol{}; std::optional<std::string> username{}; std::optional<std::string> password{}; std::optional<std::string> hostname{}; std::optional<std::string> port{}; std::optional<std::string> pathname{}; std::optional<std::string> search{}; std::optional<std::string> hash{}; std::optional<std::string> base_url{}; @@ -4366,6 +4381,7 @@ tl::expected<url_pattern<regex_provider>, errors> parse_url_pattern_impl( #ifndef ADA_IMPLEMENTATION_H #define ADA_IMPLEMENTATION_H @@ -4379,6 +4395,7 @@ tl::expected<url_pattern<regex_provider>, errors> parse_url_pattern_impl( #include <algorithm> @@ -5040,7 +5057,9 @@ std::string href_from_file(std::string_view path); #endif // ADA_IMPLEMENTATION_H /* end file include/ada/implementation.h */ #include <unordered_map> #include <vector> @@ -5219,6 +5238,8 @@ class url_pattern_component { bool has_regexp_groups = false; +// A URLPattern input can be either a string or a URLPatternInit object. +// If it is a string, it must be a valid UTF-8 string. using url_pattern_input = std::variant<std::string_view, url_pattern_init>; // A struct providing the URLPattern matching results for all @@ -5251,12 +5272,16 @@ struct url_pattern_options { // defined in https://wicg.github.io/urlpattern. // More information about the URL Pattern syntax can be found at // https://developer.mozilla.org/en-US/docs/Web/API/URL_Pattern_API +// We require all strings to be valid UTF-8: it is the user's responsibility +// to ensure that the provided strings are valid UTF-8. template <url_pattern_regex::regex_concept regex_provider> class url_pattern { public: url_pattern() = default; * @see https://urlpattern.spec.whatwg.org/#dom-urlpattern-exec result<std::optional<url_pattern_result>> exec( @@ -5264,6 +5289,7 @@ class url_pattern { const std::string_view* base_url = nullptr); * @see https://urlpattern.spec.whatwg.org/#dom-urlpattern-test result<bool> test(const url_pattern_input& input, @@ -5725,6 +5751,7 @@ std::string generate_segment_wildcard_regexp( /* end file include/ada/url_pattern_helpers.h */ @@ -6257,6 +6284,7 @@ ada_warn_unused std::string to_string(ada::state s); @@ -6870,6 +6898,7 @@ namespace ada { #ifndef ADA_URL_AGGREGATOR_H #define ADA_URL_AGGREGATOR_H @@ -7255,6 +7284,7 @@ ada_really_inline size_t percent_encode_index(const std::string_view input, /* end file include/ada/unicode-inl.h */ #include <charconv> @@ -8406,6 +8436,8 @@ using url_search_params_entries_iter = url_search_params_iter_type::ENTRIES>; + * We require all strings to be valid UTF-8. It is the user's responsibility to + * ensure that the provided strings are valid UTF-8. * @see https://url.spec.whatwg.org/#interface-urlsearchparams */ struct url_search_params { @@ -8428,6 +8460,7 @@ struct url_search_params { [[nodiscard]] inline size_t size() const noexcept; * @see https://url.spec.whatwg.org/#dom-urlsearchparams-append inline void append(std::string_view key, std::string_view value); @@ -8455,6 +8488,7 @@ struct url_search_params { inline bool has(std::string_view key, std::string_view value) noexcept; * @see https://url.spec.whatwg.org/#dom-urlsearchparams-set inline void set(std::string_view key, std::string_view value); @@ -8518,6 +8552,7 @@ struct url_search_params { std::vector<key_value_pair> params{}; + * The init parameter must be valid UTF-8. * @see https://url.spec.whatwg.org/#concept-urlencoded-parser void initialize(std::string_view init); @@ -8724,10 +8759,80 @@ inline void url_search_params::remove(const std::string_view key, inline void url_search_params::sort() { - return lhs.first < rhs.first; - }); + // We rely on the fact that the content is valid UTF-8. + std::ranges::stable_sort(params, [](const key_value_pair &lhs, + const key_value_pair &rhs) { + uint32_t low_surrogate1 = 0, low_surrogate2 = 0; + while ((i < lhs.first.size() || low_surrogate1 != 0) && + uint32_t codePoint1 = 0, codePoint2 = 0; + if (low_surrogate1 != 0) { + codePoint1 = low_surrogate1; + uint8_t c1 = uint8_t(lhs.first[i]); + if (c1 <= 0x7F) { + codePoint1 = c1; + i++; + } else if (c1 <= 0xDF) { + codePoint1 = ((c1 & 0x1F) << 6) | (uint8_t(lhs.first[i + 1]) & 0x3F); + i += 2; + } else if (c1 <= 0xEF) { + codePoint1 = ((c1 & 0x0F) << 12) | + ((uint8_t(lhs.first[i + 1]) & 0x3F) << 6) | + (uint8_t(lhs.first[i + 2]) & 0x3F); + i += 3; + codePoint1 = ((c1 & 0x07) << 18) | + ((uint8_t(lhs.first[i + 1]) & 0x3F) << 12) | + ((uint8_t(lhs.first[i + 2]) & 0x3F) << 6) | + (uint8_t(lhs.first[i + 3]) & 0x3F); + i += 4; + codePoint1 -= 0x10000; + uint16_t high_surrogate = uint16_t(0xD800 + (codePoint1 >> 10)); + low_surrogate1 = uint16_t(0xDC00 + (codePoint1 & 0x3FF)); + codePoint1 = high_surrogate; + if (low_surrogate2 != 0) { + codePoint2 = low_surrogate2; + low_surrogate2 = 0; + uint8_t c2 = uint8_t(rhs.first[j]); + if (c2 <= 0x7F) { + codePoint2 = c2; + j++; + } else if (c2 <= 0xDF) { + codePoint2 = ((c2 & 0x1F) << 6) | (uint8_t(rhs.first[j + 1]) & 0x3F); + j += 2; + } else if (c2 <= 0xEF) { + codePoint2 = ((c2 & 0x0F) << 12) | + ((uint8_t(rhs.first[j + 1]) & 0x3F) << 6) | + (uint8_t(rhs.first[j + 2]) & 0x3F); + j += 3; + ((uint8_t(rhs.first[j + 1]) & 0x3F) << 12) | + ((uint8_t(rhs.first[j + 2]) & 0x3F) << 6) | + (uint8_t(rhs.first[j + 3]) & 0x3F); + j += 4; + codePoint2 -= 0x10000; + uint16_t high_surrogate = uint16_t(0xD800 + (codePoint2 >> 10)); + low_surrogate2 = uint16_t(0xDC00 + (codePoint2 & 0x3FF)); + codePoint2 = high_surrogate; + if (codePoint1 != codePoint2) { + return (codePoint1 < codePoint2); + return (j < rhs.first.size() || low_surrogate2 != 0); + }); inline url_search_params_keys_iter url_search_params::get_keys() { @@ -10330,14 +10435,14 @@ constructor_string_parser<regex_provider>::parse(std::string_view input) { #ifndef ADA_ADA_VERSION_H #define ADA_ADA_VERSION_H -#define ADA_VERSION "3.0.1" enum { ADA_VERSION_MAJOR = 3, - ADA_VERSION_MINOR = 0, - ADA_VERSION_REVISION = 1, + ADA_VERSION_MINOR = 1, + ADA_VERSION_REVISION = 0,
[ "+// All provided strings must be valid UTF-8.", "+//", "- std::ranges::stable_sort(", "- params, [](const key_value_pair &lhs, const key_value_pair &rhs) {", "+ size_t i = 0, j = 0;", "+ (j < rhs.first.size() || low_surrogate2 != 0)) {", "+ low_surrogate1 = 0;", "+ codePoint2 = ((c2 & 0x07) << 18) |", "+ }", "+#define ADA_VERSION \"3.1.0\"" ]
[ 50, 120, 210, 211, 217, 220, 225, 270, 285, 296 ]
{ "additions": 115, "author": "nodejs-github-bot", "deletions": 10, "html_url": "https://github.com/nodejs/node/pull/57083", "issue_id": 57083, "merged_at": "2025-02-20T18:10:42Z", "omission_probability": 0.1, "pr_number": 57083, "repo": "nodejs/node", "title": "deps: update ada to 3.1.0", "total_changes": 125 }
884
diff --git a/test/fixtures/wpt/README.md b/test/fixtures/wpt/README.md index e464af4932d4a1..a528d2035ccaf5 100644 --- a/test/fixtures/wpt/README.md +++ b/test/fixtures/wpt/README.md @@ -29,7 +29,7 @@ Last update: - resources: https://github.com/web-platform-tests/wpt/tree/1e140d63ec/resources - streams: https://github.com/web-platform-tests/wpt/tree/bc9dcbbf1a/streams - url: https://github.com/web-platform-tests/wpt/tree/a23788b77a/url -- urlpattern: https://github.com/web-platform-tests/wpt/tree/ef6d83d789/urlpattern +- urlpattern: https://github.com/web-platform-tests/wpt/tree/3b6b19853a/urlpattern - user-timing: https://github.com/web-platform-tests/wpt/tree/5ae85bf826/user-timing - wasm/jsapi: https://github.com/web-platform-tests/wpt/tree/cde25e7e3c/wasm/jsapi - wasm/webapi: https://github.com/web-platform-tests/wpt/tree/fd1b23eeaa/wasm/webapi diff --git a/test/fixtures/wpt/urlpattern/resources/urlpatterntestdata.json b/test/fixtures/wpt/urlpattern/resources/urlpatterntestdata.json index 1d2ba25ff7d696..3d5ddce1eab21e 100644 --- a/test/fixtures/wpt/urlpattern/resources/urlpatterntestdata.json +++ b/test/fixtures/wpt/urlpattern/resources/urlpatterntestdata.json @@ -1202,6 +1202,14 @@ { "pattern": [{ "protocol": "http", "port": "80 " }], "inputs": [{ "protocol": "http", "port": "80" }], + "exactly_empty_components": ["port"], + "expected_match": { + "protocol": { "input": "http", "groups": {} } + } + }, + { + "pattern": [{ "protocol": "http", "port": "100000" }], + "inputs": [{ "protocol": "http", "port": "100000" }], "expected_obj": "error" }, { @@ -2424,7 +2432,11 @@ }, { "pattern": [{ "hostname": "bad#hostname" }], - "expected_obj": "error" + "inputs": [{ "hostname": "bad" }], + "exactly_empty_components": ["port"], + "expected_match": { + "hostname": { "input": "bad", "groups": {} } + } }, { "pattern": [{ "hostname": "bad%hostname" }], @@ -2432,7 +2444,10 @@ }, { "pattern": [{ "hostname": "bad/hostname" }], - "expected_obj": "error" + "inputs": [{ "hostname": "bad" }], + "expected_match": { + "hostname": { "input": "bad", "groups": {} } + } }, { "pattern": [{ "hostname": "bad\\:hostname" }], @@ -2464,7 +2479,8 @@ }, { "pattern": [{ "hostname": "bad\\\\hostname" }], - "expected_obj": "error" + "inputs": [{ "hostname": "badhostname" }], + "expected_match": null }, { "pattern": [{ "hostname": "bad^hostname" }], @@ -2476,15 +2492,24 @@ }, { "pattern": [{ "hostname": "bad\nhostname" }], - "expected_obj": "error" + "inputs": [{ "hostname": "badhostname" }], + "expected_match": { + "hostname": { "input": "badhostname", "groups": {} } + } }, { "pattern": [{ "hostname": "bad\rhostname" }], - "expected_obj": "error" + "inputs": [{ "hostname": "badhostname" }], + "expected_match": { + "hostname": { "input": "badhostname", "groups": {} } + } }, { "pattern": [{ "hostname": "bad\thostname" }], - "expected_obj": "error" + "inputs": [{ "hostname": "badhostname" }], + "expected_match": { + "hostname": { "input": "badhostname", "groups": {} } + } }, { "pattern": [{}], diff --git a/test/fixtures/wpt/versions.json b/test/fixtures/wpt/versions.json index 413aa54e8b8027..589996f58a81b0 100644 --- a/test/fixtures/wpt/versions.json +++ b/test/fixtures/wpt/versions.json @@ -76,7 +76,7 @@ "path": "url" }, "urlpattern": { - "commit": "ef6d83d789483763207af8cedcbf1f3c1317b981", + "commit": "3b6b19853a928ec9bfa28e9046c3cf601f160e42", "path": "urlpattern" }, "user-timing": { diff --git a/test/wpt/status/urlpattern.json b/test/wpt/status/urlpattern.json index 58877b85017280..6df26fd66a7d2b 100644 --- a/test/wpt/status/urlpattern.json +++ b/test/wpt/status/urlpattern.json @@ -11,28 +11,26 @@ "urlpattern.any.js": { "fail": { "expected": [ - "Pattern: [{\"protocol\":\"http\",\"port\":\"80 \"}] Inputs: [{\"protocol\":\"http\",\"port\":\"80\"}]", "Pattern: [\"https://{sub.}?example{.com/}foo\"] Inputs: [\"https://example.com/foo\"]", - "Pattern: [{\"hostname\":\"bad#hostname\"}] Inputs: undefined", - "Pattern: [{\"hostname\":\"bad/hostname\"}] Inputs: undefined", - "Pattern: [{\"hostname\":\"bad\\\\\\\\hostname\"}] Inputs: undefined", - "Pattern: [{\"hostname\":\"bad\\nhostname\"}] Inputs: undefined", - "Pattern: [{\"hostname\":\"bad\\rhostname\"}] Inputs: undefined", - "Pattern: [{\"hostname\":\"bad\\thostname\"}] Inputs: undefined" + "Pattern: [{\"hostname\":\"bad#hostname\"}] Inputs: [{\"hostname\":\"bad\"}]", + "Pattern: [{\"hostname\":\"bad/hostname\"}] Inputs: [{\"hostname\":\"bad\"}]", + "Pattern: [{\"hostname\":\"bad\\\\\\\\hostname\"}] Inputs: [{\"hostname\":\"badhostname\"}]", + "Pattern: [{\"hostname\":\"bad\\nhostname\"}] Inputs: [{\"hostname\":\"badhostname\"}]", + "Pattern: [{\"hostname\":\"bad\\rhostname\"}] Inputs: [{\"hostname\":\"badhostname\"}]", + "Pattern: [{\"hostname\":\"bad\\thostname\"}] Inputs: [{\"hostname\":\"badhostname\"}]" ] } }, "urlpattern.https.any.js": { "fail": { "expected": [ - "Pattern: [{\"protocol\":\"http\",\"port\":\"80 \"}] Inputs: [{\"protocol\":\"http\",\"port\":\"80\"}]", "Pattern: [\"https://{sub.}?example{.com/}foo\"] Inputs: [\"https://example.com/foo\"]", - "Pattern: [{\"hostname\":\"bad#hostname\"}] Inputs: undefined", - "Pattern: [{\"hostname\":\"bad/hostname\"}] Inputs: undefined", - "Pattern: [{\"hostname\":\"bad\\\\\\\\hostname\"}] Inputs: undefined", - "Pattern: [{\"hostname\":\"bad\\nhostname\"}] Inputs: undefined", - "Pattern: [{\"hostname\":\"bad\\rhostname\"}] Inputs: undefined", - "Pattern: [{\"hostname\":\"bad\\thostname\"}] Inputs: undefined" + "Pattern: [{\"hostname\":\"bad#hostname\"}] Inputs: [{\"hostname\":\"bad\"}]", + "Pattern: [{\"hostname\":\"bad/hostname\"}] Inputs: [{\"hostname\":\"bad\"}]", + "Pattern: [{\"hostname\":\"bad\\\\\\\\hostname\"}] Inputs: [{\"hostname\":\"badhostname\"}]", + "Pattern: [{\"hostname\":\"bad\\nhostname\"}] Inputs: [{\"hostname\":\"badhostname\"}]", + "Pattern: [{\"hostname\":\"bad\\rhostname\"}] Inputs: [{\"hostname\":\"badhostname\"}]", + "Pattern: [{\"hostname\":\"bad\\thostname\"}] Inputs: [{\"hostname\":\"badhostname\"}]" ] } }
diff --git a/test/fixtures/wpt/README.md b/test/fixtures/wpt/README.md index e464af4932d4a1..a528d2035ccaf5 100644 --- a/test/fixtures/wpt/README.md +++ b/test/fixtures/wpt/README.md @@ -29,7 +29,7 @@ Last update: - resources: https://github.com/web-platform-tests/wpt/tree/1e140d63ec/resources - streams: https://github.com/web-platform-tests/wpt/tree/bc9dcbbf1a/streams - url: https://github.com/web-platform-tests/wpt/tree/a23788b77a/url -- urlpattern: https://github.com/web-platform-tests/wpt/tree/ef6d83d789/urlpattern +- urlpattern: https://github.com/web-platform-tests/wpt/tree/3b6b19853a/urlpattern - user-timing: https://github.com/web-platform-tests/wpt/tree/5ae85bf826/user-timing - wasm/jsapi: https://github.com/web-platform-tests/wpt/tree/cde25e7e3c/wasm/jsapi - wasm/webapi: https://github.com/web-platform-tests/wpt/tree/fd1b23eeaa/wasm/webapi diff --git a/test/fixtures/wpt/urlpattern/resources/urlpatterntestdata.json b/test/fixtures/wpt/urlpattern/resources/urlpatterntestdata.json index 1d2ba25ff7d696..3d5ddce1eab21e 100644 --- a/test/fixtures/wpt/urlpattern/resources/urlpatterntestdata.json +++ b/test/fixtures/wpt/urlpattern/resources/urlpatterntestdata.json @@ -1202,6 +1202,14 @@ "pattern": [{ "protocol": "http", "port": "80 " }], "inputs": [{ "protocol": "http", "port": "80" }], + "protocol": { "input": "http", "groups": {} } + }, + { + "pattern": [{ "protocol": "http", "port": "100000" }], + "inputs": [{ "protocol": "http", "port": "100000" }], "expected_obj": "error" @@ -2424,7 +2432,11 @@ "pattern": [{ "hostname": "bad#hostname" }], "pattern": [{ "hostname": "bad%hostname" }], @@ -2432,7 +2444,10 @@ "pattern": [{ "hostname": "bad/hostname" }], "pattern": [{ "hostname": "bad\\:hostname" }], @@ -2464,7 +2479,8 @@ "pattern": [{ "hostname": "bad\\\\hostname" }], + "expected_match": null "pattern": [{ "hostname": "bad^hostname" }], @@ -2476,15 +2492,24 @@ "pattern": [{ "hostname": "bad\nhostname" }], "pattern": [{ "hostname": "bad\rhostname" }], "pattern": [{ "hostname": "bad\thostname" }], "pattern": [{}], diff --git a/test/fixtures/wpt/versions.json b/test/fixtures/wpt/versions.json index 413aa54e8b8027..589996f58a81b0 100644 --- a/test/fixtures/wpt/versions.json +++ b/test/fixtures/wpt/versions.json @@ -76,7 +76,7 @@ "path": "url" "urlpattern": { - "commit": "ef6d83d789483763207af8cedcbf1f3c1317b981", + "commit": "3b6b19853a928ec9bfa28e9046c3cf601f160e42", "path": "urlpattern" "user-timing": { diff --git a/test/wpt/status/urlpattern.json b/test/wpt/status/urlpattern.json index 58877b85017280..6df26fd66a7d2b 100644 --- a/test/wpt/status/urlpattern.json +++ b/test/wpt/status/urlpattern.json @@ -11,28 +11,26 @@ "urlpattern.any.js": { "urlpattern.https.any.js": { }
[]
[]
{ "additions": 45, "author": "nodejs-github-bot", "deletions": 22, "html_url": "https://github.com/nodejs/node/pull/57377", "issue_id": 57377, "merged_at": "2025-03-10T18:23:54Z", "omission_probability": 0.1, "pr_number": 57377, "repo": "nodejs/node", "title": "test: update WPT for urlpattern to 3b6b19853a", "total_changes": 67 }
885
diff --git a/doc/api/cli.md b/doc/api/cli.md index 91ab8eb351cb43..c4d3afc71aa24c 100644 --- a/doc/api/cli.md +++ b/doc/api/cli.md @@ -219,15 +219,8 @@ The initializer module also needs to be allowed. Consider the following example: ```console $ node --experimental-permission t.js -node:internal/modules/cjs/loader:162 - const result = internalModuleStat(receiver, filename); - ^ Error: Access to this API has been restricted - at stat (node:internal/modules/cjs/loader:162:18) - at Module._findPath (node:internal/modules/cjs/loader:640:16) - at resolveMainPath (node:internal/modules/run_main:15:25) - at Function.executeUserEntryPoint [as runMain] (node:internal/modules/run_main:53:24) at node:internal/main/run_main_module:23:47 { code: 'ERR_ACCESS_DENIED', permission: 'FileSystemRead', @@ -300,18 +293,8 @@ new WASI({ ```console $ node --experimental-permission --allow-fs-read=* index.js -node:wasi:99 - const wrap = new _WASI(args, env, preopens, stdio); - ^ Error: Access to this API has been restricted - at new WASI (node:wasi:99:18) - at Object.<anonymous> (/home/index.js:3:1) - at Module._compile (node:internal/modules/cjs/loader:1476:14) - at Module._extensions..js (node:internal/modules/cjs/loader:1555:10) - at Module.load (node:internal/modules/cjs/loader:1288:32) - at Module._load (node:internal/modules/cjs/loader:1104:12) - at Function.executeUserEntryPoint [as runMain] (node:internal/modules/run_main:191:14) at node:internal/main/run_main_module:30:49 { code: 'ERR_ACCESS_DENIED', permission: 'WASI', @@ -341,18 +324,8 @@ new Worker(__filename); ```console $ node --experimental-permission --allow-fs-read=* index.js -node:internal/worker:188 - this[kHandle] = new WorkerImpl(url, - ^ Error: Access to this API has been restricted - at new Worker (node:internal/worker:188:21) - at Object.<anonymous> (/home/index.js.js:3:1) - at Module._compile (node:internal/modules/cjs/loader:1120:14) - at Module._extensions..js (node:internal/modules/cjs/loader:1174:10) - at Module.load (node:internal/modules/cjs/loader:998:32) - at Module._load (node:internal/modules/cjs/loader:839:12) - at Function.executeUserEntryPoint [as runMain] (node:internal/modules/run_main:81:12) at node:internal/main/run_main_module:17:47 { code: 'ERR_ACCESS_DENIED', permission: 'WorkerThreads' diff --git a/doc/api/permissions.md b/doc/api/permissions.md index e37c2982bc146a..a03285e28641e8 100644 --- a/doc/api/permissions.md +++ b/doc/api/permissions.md @@ -47,15 +47,8 @@ will be restricted. ```console $ node --experimental-permission index.js -node:internal/modules/cjs/loader:171 - const result = internalModuleStat(receiver, filename); - ^ Error: Access to this API has been restricted - at stat (node:internal/modules/cjs/loader:171:18) - at Module._findPath (node:internal/modules/cjs/loader:627:16) - at resolveMainPath (node:internal/modules/run_main:19:25) - at Function.executeUserEntryPoint [as runMain] (node:internal/modules/run_main:76:24) at node:internal/main/run_main_module:23:47 { code: 'ERR_ACCESS_DENIED', permission: 'FileSystemRead', diff --git a/lib/internal/modules/cjs/loader.js b/lib/internal/modules/cjs/loader.js index 3f161daa7f3c55..95763de0701581 100644 --- a/lib/internal/modules/cjs/loader.js +++ b/lib/internal/modules/cjs/loader.js @@ -160,7 +160,6 @@ const packageJsonReader = require('internal/modules/package_json_reader'); const { getOptionValue, getEmbedderOptions } = require('internal/options'); const shouldReportRequiredModules = getLazy(() => process.env.WATCH_REPORT_DEPENDENCIES); -const permission = require('internal/process/permission'); const { vm_dynamic_import_default_internal, } = internalBinding('symbols'); @@ -729,11 +728,8 @@ Module._findPath = function(request, paths, isMain) { // For each path for (let i = 0; i < paths.length; i++) { // Don't search further if path doesn't exist - // or doesn't have permission to it const curPath = paths[i]; - if (insidePath && curPath && - ((permission.isEnabled() && !permission.has('fs.read', curPath)) || _stat(curPath) < 1) - ) { + if (insidePath && curPath && _stat(curPath) < 1) { continue; } diff --git a/src/node_file.cc b/src/node_file.cc index dfdc25fd1d8986..5a50aacb1b939d 100644 --- a/src/node_file.cc +++ b/src/node_file.cc @@ -1037,6 +1037,8 @@ static void ExistsSync(const FunctionCallbackInfo<Value>& args) { // Used to speed up module loading. Returns 0 if the path refers to // a file, 1 when it's a directory or < 0 on error (usually -ENOENT.) // The speedup comes from not creating thousands of Stat and Error objects. +// Do not expose this function through public API as it doesn't hold +// Permission Model checks. static void InternalModuleStat(const FunctionCallbackInfo<Value>& args) { Environment* env = Environment::GetCurrent(args); @@ -1045,8 +1047,6 @@ static void InternalModuleStat(const FunctionCallbackInfo<Value>& args) { BufferValue path(env->isolate(), args[1]); CHECK_NOT_NULL(*path); ToNamespacedPath(env, &path); - THROW_IF_INSUFFICIENT_PERMISSIONS( - env, permission::PermissionScope::kFileSystemRead, path.ToStringView()); uv_fs_t req; int rc = uv_fs_stat(env->event_loop(), &req, *path, nullptr); @@ -1069,8 +1069,6 @@ static int32_t FastInternalModuleStat( HandleScope scope(env->isolate()); auto path = std::filesystem::path(input.data, input.data + input.length); - THROW_IF_INSUFFICIENT_PERMISSIONS( - env, permission::PermissionScope::kFileSystemRead, path.string(), -1); switch (std::filesystem::status(path).type()) { case std::filesystem::file_type::directory: diff --git a/test/fixtures/permission/fs-read.js b/test/fixtures/permission/fs-read.js index b189af295793e6..186117a6b768dd 100644 --- a/test/fixtures/permission/fs-read.js +++ b/test/fixtures/permission/fs-read.js @@ -282,6 +282,13 @@ const regularFile = __filename; permission: 'FileSystemRead', resource: path.toNamespacedPath(blockedFolder), })); + assert.throws(() => { + fs.readdirSync(blockedFolder, { recursive: true }); + }, common.expectsError({ + code: 'ERR_ACCESS_DENIED', + permission: 'FileSystemRead', + resource: path.toNamespacedPath(blockedFolder), + })); assert.throws(() => { fs.readdirSync(blockedFolder); }, common.expectsError({ diff --git a/test/fixtures/permission/main-module.js b/test/fixtures/permission/main-module.js new file mode 100644 index 00000000000000..cac52e04dddd24 --- /dev/null +++ b/test/fixtures/permission/main-module.js @@ -0,0 +1 @@ +require('./required-module'); \ No newline at end of file diff --git a/test/fixtures/permission/main-module.mjs b/test/fixtures/permission/main-module.mjs new file mode 100644 index 00000000000000..e7c28f7f6cab19 --- /dev/null +++ b/test/fixtures/permission/main-module.mjs @@ -0,0 +1 @@ +import './required-module.mjs'; \ No newline at end of file diff --git a/test/fixtures/permission/required-module.js b/test/fixtures/permission/required-module.js new file mode 100644 index 00000000000000..e8dbf442c5b1a2 --- /dev/null +++ b/test/fixtures/permission/required-module.js @@ -0,0 +1 @@ +console.log('ok'); \ No newline at end of file diff --git a/test/fixtures/permission/required-module.mjs b/test/fixtures/permission/required-module.mjs new file mode 100644 index 00000000000000..e8dbf442c5b1a2 --- /dev/null +++ b/test/fixtures/permission/required-module.mjs @@ -0,0 +1 @@ +console.log('ok'); \ No newline at end of file diff --git a/test/parallel/test-permission-fs-internal-module-stat.js b/test/parallel/test-permission-fs-internal-module-stat.js index 2f000f4814a0ca..f0b9d86f0809a8 100644 --- a/test/parallel/test-permission-fs-internal-module-stat.js +++ b/test/parallel/test-permission-fs-internal-module-stat.js @@ -9,8 +9,6 @@ if (!common.hasCrypto) { } const { internalBinding } = require('internal/test/binding'); -const assert = require('node:assert'); -const path = require('node:path'); const fixtures = require('../common/fixtures'); const blockedFile = fixtures.path('permission', 'deny', 'protected-file.md'); @@ -18,11 +16,7 @@ const internalFsBinding = internalBinding('fs'); // Run this inside a for loop to trigger the fast API for (let i = 0; i < 10_000; i++) { - assert.throws(() => { - internalFsBinding.internalModuleStat(internalFsBinding, blockedFile); - }, { - code: 'ERR_ACCESS_DENIED', - permission: 'FileSystemRead', - resource: path.toNamespacedPath(blockedFile), - }); + // internalModuleStat does not use permission model. + // doesNotThrow + internalFsBinding.internalModuleStat(internalFsBinding, blockedFile); } diff --git a/test/parallel/test-permission-fs-require.js b/test/parallel/test-permission-fs-require.js new file mode 100644 index 00000000000000..6a2e9201dac7b4 --- /dev/null +++ b/test/parallel/test-permission-fs-require.js @@ -0,0 +1,76 @@ +// Flags: --experimental-permission --allow-fs-read=* --allow-child-process +'use strict'; + +const common = require('../common'); +common.skipIfWorker(); +const fixtures = require('../common/fixtures'); + +const assert = require('node:assert'); +const { spawnSync } = require('node:child_process'); + +{ + const mainModule = fixtures.path('permission', 'main-module.js'); + const requiredModule = fixtures.path('permission', 'required-module.js'); + const { status, stdout, stderr } = spawnSync( + process.execPath, + [ + '--experimental-permission', + '--allow-fs-read', mainModule, + '--allow-fs-read', requiredModule, + mainModule, + ] + ); + + assert.strictEqual(status, 0, stderr.toString()); + assert.strictEqual(stdout.toString(), 'ok\n'); +} + +{ + // When required module is not passed as allowed path + const mainModule = fixtures.path('permission', 'main-module.js'); + const { status, stderr } = spawnSync( + process.execPath, + [ + '--experimental-permission', + '--allow-fs-read', mainModule, + mainModule, + ] + ); + + assert.strictEqual(status, 1, stderr.toString()); + assert.match(stderr.toString(), /Error: Access to this API has been restricted/); +} + +{ + // ESM loader test + const mainModule = fixtures.path('permission', 'main-module.mjs'); + const requiredModule = fixtures.path('permission', 'required-module.mjs'); + const { status, stdout, stderr } = spawnSync( + process.execPath, + [ + '--experimental-permission', + '--allow-fs-read', mainModule, + '--allow-fs-read', requiredModule, + mainModule, + ] + ); + + assert.strictEqual(status, 0, stderr.toString()); + assert.strictEqual(stdout.toString(), 'ok\n'); +} + +{ + // When required module is not passed as allowed path (ESM) + const mainModule = fixtures.path('permission', 'main-module.mjs'); + const { status, stderr } = spawnSync( + process.execPath, + [ + '--experimental-permission', + '--allow-fs-read', mainModule, + mainModule, + ] + ); + + assert.strictEqual(status, 1, stderr.toString()); + assert.match(stderr.toString(), /Error: Access to this API has been restricted/); +}
diff --git a/doc/api/cli.md b/doc/api/cli.md index 91ab8eb351cb43..c4d3afc71aa24c 100644 --- a/doc/api/cli.md +++ b/doc/api/cli.md @@ -219,15 +219,8 @@ The initializer module also needs to be allowed. Consider the following example: $ node --experimental-permission t.js -node:internal/modules/cjs/loader:162 - at stat (node:internal/modules/cjs/loader:162:18) - at Module._findPath (node:internal/modules/cjs/loader:640:16) @@ -300,18 +293,8 @@ new WASI({ -node:wasi:99 - const wrap = new _WASI(args, env, preopens, stdio); - at new WASI (node:wasi:99:18) - at Object.<anonymous> (/home/index.js:3:1) - at Module._compile (node:internal/modules/cjs/loader:1476:14) - at Module._extensions..js (node:internal/modules/cjs/loader:1555:10) - at Module.load (node:internal/modules/cjs/loader:1288:32) - at Module._load (node:internal/modules/cjs/loader:1104:12) - at Function.executeUserEntryPoint [as runMain] (node:internal/modules/run_main:191:14) at node:internal/main/run_main_module:30:49 { permission: 'WASI', @@ -341,18 +324,8 @@ new Worker(__filename); -node:internal/worker:188 - this[kHandle] = new WorkerImpl(url, - at new Worker (node:internal/worker:188:21) - at Object.<anonymous> (/home/index.js.js:3:1) - at Module._compile (node:internal/modules/cjs/loader:1120:14) - at Module._extensions..js (node:internal/modules/cjs/loader:1174:10) - at Module.load (node:internal/modules/cjs/loader:998:32) - at Module._load (node:internal/modules/cjs/loader:839:12) - at Function.executeUserEntryPoint [as runMain] (node:internal/modules/run_main:81:12) at node:internal/main/run_main_module:17:47 { permission: 'WorkerThreads' diff --git a/doc/api/permissions.md b/doc/api/permissions.md index e37c2982bc146a..a03285e28641e8 100644 --- a/doc/api/permissions.md +++ b/doc/api/permissions.md @@ -47,15 +47,8 @@ will be restricted. $ node --experimental-permission index.js -node:internal/modules/cjs/loader:171 - at stat (node:internal/modules/cjs/loader:171:18) - at Module._findPath (node:internal/modules/cjs/loader:627:16) - at resolveMainPath (node:internal/modules/run_main:19:25) - at Function.executeUserEntryPoint [as runMain] (node:internal/modules/run_main:76:24) diff --git a/lib/internal/modules/cjs/loader.js b/lib/internal/modules/cjs/loader.js index 3f161daa7f3c55..95763de0701581 100644 --- a/lib/internal/modules/cjs/loader.js +++ b/lib/internal/modules/cjs/loader.js @@ -160,7 +160,6 @@ const packageJsonReader = require('internal/modules/package_json_reader'); const { getOptionValue, getEmbedderOptions } = require('internal/options'); const shouldReportRequiredModules = getLazy(() => process.env.WATCH_REPORT_DEPENDENCIES); -const permission = require('internal/process/permission'); const { vm_dynamic_import_default_internal, } = internalBinding('symbols'); @@ -729,11 +728,8 @@ Module._findPath = function(request, paths, isMain) { // For each path for (let i = 0; i < paths.length; i++) { // Don't search further if path doesn't exist - // or doesn't have permission to it const curPath = paths[i]; - if (insidePath && curPath && - ((permission.isEnabled() && !permission.has('fs.read', curPath)) || _stat(curPath) < 1) - ) { continue; } diff --git a/src/node_file.cc b/src/node_file.cc index dfdc25fd1d8986..5a50aacb1b939d 100644 --- a/src/node_file.cc +++ b/src/node_file.cc @@ -1037,6 +1037,8 @@ static void ExistsSync(const FunctionCallbackInfo<Value>& args) { // Used to speed up module loading. Returns 0 if the path refers to // a file, 1 when it's a directory or < 0 on error (usually -ENOENT.) // The speedup comes from not creating thousands of Stat and Error objects. +// Do not expose this function through public API as it doesn't hold +// Permission Model checks. static void InternalModuleStat(const FunctionCallbackInfo<Value>& args) { Environment* env = Environment::GetCurrent(args); @@ -1045,8 +1047,6 @@ static void InternalModuleStat(const FunctionCallbackInfo<Value>& args) { BufferValue path(env->isolate(), args[1]); CHECK_NOT_NULL(*path); ToNamespacedPath(env, &path); - env, permission::PermissionScope::kFileSystemRead, path.ToStringView()); uv_fs_t req; int rc = uv_fs_stat(env->event_loop(), &req, *path, nullptr); @@ -1069,8 +1069,6 @@ static int32_t FastInternalModuleStat( HandleScope scope(env->isolate()); auto path = std::filesystem::path(input.data, input.data + input.length); - env, permission::PermissionScope::kFileSystemRead, path.string(), -1); switch (std::filesystem::status(path).type()) { case std::filesystem::file_type::directory: diff --git a/test/fixtures/permission/fs-read.js b/test/fixtures/permission/fs-read.js index b189af295793e6..186117a6b768dd 100644 --- a/test/fixtures/permission/fs-read.js +++ b/test/fixtures/permission/fs-read.js @@ -282,6 +282,13 @@ const regularFile = __filename; permission: 'FileSystemRead', resource: path.toNamespacedPath(blockedFolder), })); + assert.throws(() => { + }, common.expectsError({ + code: 'ERR_ACCESS_DENIED', + permission: 'FileSystemRead', + })); assert.throws(() => { fs.readdirSync(blockedFolder); }, common.expectsError({ diff --git a/test/fixtures/permission/main-module.js b/test/fixtures/permission/main-module.js index 00000000000000..cac52e04dddd24 +++ b/test/fixtures/permission/main-module.js +require('./required-module'); diff --git a/test/fixtures/permission/main-module.mjs b/test/fixtures/permission/main-module.mjs index 00000000000000..e7c28f7f6cab19 +++ b/test/fixtures/permission/main-module.mjs +import './required-module.mjs'; diff --git a/test/fixtures/permission/required-module.js b/test/fixtures/permission/required-module.js +++ b/test/fixtures/permission/required-module.js diff --git a/test/fixtures/permission/required-module.mjs b/test/fixtures/permission/required-module.mjs +++ b/test/fixtures/permission/required-module.mjs diff --git a/test/parallel/test-permission-fs-internal-module-stat.js b/test/parallel/test-permission-fs-internal-module-stat.js index 2f000f4814a0ca..f0b9d86f0809a8 100644 --- a/test/parallel/test-permission-fs-internal-module-stat.js +++ b/test/parallel/test-permission-fs-internal-module-stat.js @@ -9,8 +9,6 @@ if (!common.hasCrypto) { const { internalBinding } = require('internal/test/binding'); -const path = require('node:path'); const fixtures = require('../common/fixtures'); const blockedFile = fixtures.path('permission', 'deny', 'protected-file.md'); @@ -18,11 +16,7 @@ const internalFsBinding = internalBinding('fs'); // Run this inside a for loop to trigger the fast API for (let i = 0; i < 10_000; i++) { - assert.throws(() => { - internalFsBinding.internalModuleStat(internalFsBinding, blockedFile); - }, { - resource: path.toNamespacedPath(blockedFile), - }); + // internalModuleStat does not use permission model. + // doesNotThrow + internalFsBinding.internalModuleStat(internalFsBinding, blockedFile); diff --git a/test/parallel/test-permission-fs-require.js b/test/parallel/test-permission-fs-require.js index 00000000000000..6a2e9201dac7b4 +++ b/test/parallel/test-permission-fs-require.js @@ -0,0 +1,76 @@ +// Flags: --experimental-permission --allow-fs-read=* --allow-child-process +'use strict'; +const common = require('../common'); +common.skipIfWorker(); +const fixtures = require('../common/fixtures'); +const assert = require('node:assert'); +const { spawnSync } = require('node:child_process'); + const requiredModule = fixtures.path('permission', 'required-module.js'); + // When required module is not passed as allowed path + // ESM loader test + const requiredModule = fixtures.path('permission', 'required-module.mjs'); + // When required module is not passed as allowed path (ESM)
[ "- at resolveMainPath (node:internal/modules/run_main:15:25)", "- at Function.executeUserEntryPoint [as runMain] (node:internal/modules/run_main:53:24)", "- ^", "+ if (insidePath && curPath && _stat(curPath) < 1) {", "+ fs.readdirSync(blockedFolder, { recursive: true });", "+ resource: path.toNamespacedPath(blockedFolder),", "-const assert = require('node:assert');", "- code: 'ERR_ACCESS_DENIED',", "- permission: 'FileSystemRead'," ]
[ 15, 16, 45, 99, 143, 147, 192, 204, 205 ]
{ "additions": 93, "author": "RafaelGSS", "deletions": 52, "html_url": "https://github.com/nodejs/node/pull/55797", "issue_id": 55797, "merged_at": "2024-11-11T17:31:44Z", "omission_probability": 0.1, "pr_number": 55797, "repo": "nodejs/node", "title": "lib,permission: ignore internalModuleStat on module loading", "total_changes": 145 }
886
diff --git a/lib/internal/assert/assertion_error.js b/lib/internal/assert/assertion_error.js index ee376899d04d35..dd56811647e5bf 100644 --- a/lib/internal/assert/assertion_error.js +++ b/lib/internal/assert/assertion_error.js @@ -134,7 +134,14 @@ function getStackedDiff(actual, expected) { } function getSimpleDiff(originalActual, actual, originalExpected, expected) { - const stringsLen = actual.length + expected.length; + let stringsLen = actual.length + expected.length; + // Accounting for the quotes wrapping strings + if (typeof originalActual === 'string') { + stringsLen -= 2; + } + if (typeof originalExpected === 'string') { + stringsLen -= 2; + } if (stringsLen <= kMaxShortStringLength && (originalActual !== 0 || originalExpected !== 0)) { return { message: `${actual} !== ${expected}`, header: '' }; } diff --git a/test/parallel/test-assert-deep.js b/test/parallel/test-assert-deep.js index aeacd21a21f029..93cc248160e6a1 100644 --- a/test/parallel/test-assert-deep.js +++ b/test/parallel/test-assert-deep.js @@ -846,6 +846,26 @@ test('Additional tests', () => { } ); + assert.throws( + () => assert.strictEqual('apple', 'pear'), + { + name: 'AssertionError', + message: 'Expected values to be strictly equal:\n\n\'apple\' !== \'pear\'\n' + } + ); + + assert.throws( + () => assert.strictEqual('ABABABABABAB', 'BABABABABABA'), + { + name: 'AssertionError', + message: 'Expected values to be strictly equal:\n' + + '+ actual - expected\n' + + '\n' + + "+ 'ABABABABABAB'\n" + + "- 'BABABABABABA'\n" + } + ); + assert.notDeepStrictEqual(new Date(), new Date(2000, 3, 14)); assert.throws( diff --git a/test/parallel/test-assert.js b/test/parallel/test-assert.js index 4a6420779a845e..e639e7f150a5df 100644 --- a/test/parallel/test-assert.js +++ b/test/parallel/test-assert.js @@ -919,11 +919,7 @@ test('Additional asserts', () => { { code: 'ERR_ASSERTION', constructor: assert.AssertionError, - message: 'Expected values to be strictly equal:\n' + - '+ actual - expected\n' + - '\n' + - "+ 'string'\n" + - '- false\n' + message: 'Expected values to be strictly equal:\n\n\'string\' !== false\n' } ); @@ -935,11 +931,7 @@ test('Additional asserts', () => { { code: 'ERR_ASSERTION', constructor: assert.AssertionError, - message: 'Expected values to be strictly equal:\n' + - '+ actual - expected\n' + - '\n' + - "+ 'string'\n" + - '- false\n' + message: 'Expected values to be strictly equal:\n\n\'string\' !== false\n' } ); @@ -951,11 +943,7 @@ test('Additional asserts', () => { }, { code: 'ERR_ASSERTION', constructor: assert.AssertionError, - message: 'Expected values to be strictly equal:\n' + - '+ actual - expected\n' + - '\n' + - "+ 'string'\n" + - '- false\n' + message: 'Expected values to be strictly equal:\n\n\'string\' !== false\n' } ); /* eslint-enable @stylistic/js/indent */
diff --git a/lib/internal/assert/assertion_error.js b/lib/internal/assert/assertion_error.js index ee376899d04d35..dd56811647e5bf 100644 --- a/lib/internal/assert/assertion_error.js +++ b/lib/internal/assert/assertion_error.js @@ -134,7 +134,14 @@ function getStackedDiff(actual, expected) { } function getSimpleDiff(originalActual, actual, originalExpected, expected) { - const stringsLen = actual.length + expected.length; + let stringsLen = actual.length + expected.length; + // Accounting for the quotes wrapping strings + if (typeof originalExpected === 'string') { if (stringsLen <= kMaxShortStringLength && (originalActual !== 0 || originalExpected !== 0)) { return { message: `${actual} !== ${expected}`, header: '' }; } diff --git a/test/parallel/test-assert-deep.js b/test/parallel/test-assert-deep.js index aeacd21a21f029..93cc248160e6a1 100644 --- a/test/parallel/test-assert-deep.js +++ b/test/parallel/test-assert-deep.js @@ -846,6 +846,26 @@ test('Additional tests', () => { + () => assert.strictEqual('apple', 'pear'), + message: 'Expected values to be strictly equal:\n\n\'apple\' !== \'pear\'\n' + () => assert.strictEqual('ABABABABABAB', 'BABABABABABA'), + message: 'Expected values to be strictly equal:\n' + + '+ actual - expected\n' + + '\n' + + "+ 'ABABABABABAB'\n" + + "- 'BABABABABABA'\n" assert.notDeepStrictEqual(new Date(), new Date(2000, 3, 14)); assert.throws( diff --git a/test/parallel/test-assert.js b/test/parallel/test-assert.js index 4a6420779a845e..e639e7f150a5df 100644 --- a/test/parallel/test-assert.js +++ b/test/parallel/test-assert.js @@ -919,11 +919,7 @@ test('Additional asserts', () => { @@ -935,11 +931,7 @@ test('Additional asserts', () => { @@ -951,11 +943,7 @@ test('Additional asserts', () => { }, { code: 'ERR_ASSERTION', constructor: assert.AssertionError, - message: 'Expected values to be strictly equal:\n' + - '+ actual - expected\n' + - '\n' + - "+ 'string'\n" + + message: 'Expected values to be strictly equal:\n\n\'string\' !== false\n' /* eslint-enable @stylistic/js/indent */
[ "+ if (typeof originalActual === 'string') {", "- '- false\\n'" ]
[ 11, 89 ]
{ "additions": 31, "author": "puskin94", "deletions": 16, "html_url": "https://github.com/nodejs/node/pull/55474", "issue_id": 55474, "merged_at": "2024-11-02T15:12:13Z", "omission_probability": 0.1, "pr_number": 55474, "repo": "nodejs/node", "title": "assert: fix the string length check for printing the simple diff", "total_changes": 47 }