File tree Expand file tree Collapse file tree 2 files changed +25
-2
lines changed Expand file tree Collapse file tree 2 files changed +25
-2
lines changed Original file line number Diff line number Diff line change @@ -694,11 +694,18 @@ def _get_model_response(
694694 f"index: { self .model_backend .current_model_index } " ,
695695 exc_info = exc ,
696696 )
697- if not response :
697+ error_info = str (exc )
698+
699+ if not response and self .model_backend .num_models > 1 :
698700 raise ModelProcessingError (
699701 "Unable to process messages: none of the provided models "
700702 "run succesfully."
701703 )
704+ elif not response :
705+ raise ModelProcessingError (
706+ f"Unable to process messages: the only provided model "
707+ f"did not run succesfully. Error: { error_info } "
708+ )
702709
703710 logger .info (
704711 f"Model { self .model_backend .model_type } , "
@@ -732,11 +739,18 @@ async def _aget_model_response(
732739 f"index: { self .model_backend .current_model_index } " ,
733740 exc_info = exc ,
734741 )
735- if not response :
742+ error_info = str (exc )
743+
744+ if not response and self .model_backend .num_models > 1 :
736745 raise ModelProcessingError (
737746 "Unable to process messages: none of the provided models "
738747 "run succesfully."
739748 )
749+ elif not response :
750+ raise ModelProcessingError (
751+ f"Unable to process messages: the only provided model "
752+ f"did not run succesfully. Error: { error_info } "
753+ )
740754
741755 logger .info (
742756 f"Model { self .model_backend .model_type } , "
Original file line number Diff line number Diff line change @@ -117,6 +117,15 @@ def current_model_index(self) -> int:
117117 """
118118 return self .models .index (self .current_model )
119119
120+ @property
121+ def num_models (self ) -> int :
122+ r"""Return the number of models in the manager.
123+
124+ Returns:
125+ int: The number of models available in the model manager.
126+ """
127+ return len (self .models )
128+
120129 @property
121130 def token_limit (self ):
122131 r"""Returns the maximum token limit for current model.
You can’t perform that action at this time.
0 commit comments