03-load_exception.diff 1.5 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546
  1. diff --git a/src/llama.cpp b/src/llama.cpp
  2. index 73f52435..58a00fb1 100644
  3. --- a/src/llama.cpp
  4. +++ b/src/llama.cpp
  5. @@ -7241,7 +7241,7 @@ static int llama_model_load(const std::string & fname, llama_model & model, llam
  6. }
  7. } catch (const std::exception & err) {
  8. LLAMA_LOG_ERROR("%s: error loading model: %s\n", __func__, err.what());
  9. - return -1;
  10. + throw;
  11. }
  12. return 0;
  13. @@ -17564,16 +17564,23 @@ struct llama_model * llama_load_model_from_file(
  14. }
  15. model->rpc_servers.push_back(servers);
  16. }
  17. - int status = llama_model_load(path_model, *model, params);
  18. - GGML_ASSERT(status <= 0);
  19. - if (status < 0) {
  20. - if (status == -1) {
  21. - LLAMA_LOG_ERROR("%s: failed to load model\n", __func__);
  22. - } else if (status == -2) {
  23. - LLAMA_LOG_INFO("%s: cancelled model load\n", __func__);
  24. +
  25. + try {
  26. + int status = llama_model_load(path_model, *model, params);
  27. + GGML_ASSERT(status <= 0);
  28. + if (status < 0) {
  29. + if (status == -1) {
  30. + LLAMA_LOG_ERROR("%s: failed to load model\n", __func__);
  31. + } else if (status == -2) {
  32. + LLAMA_LOG_INFO("%s: cancelled model load\n", __func__);
  33. + }
  34. + delete model;
  35. + return nullptr;
  36. }
  37. + } catch (...) {
  38. + LLAMA_LOG_ERROR("%s: exception loading model\n", __func__);
  39. delete model;
  40. - return nullptr;
  41. + throw;
  42. }
  43. return model;