Skip to content

Commit a988166

Browse files
committed
handle configuration failure edge cases
Signed-off-by: Keval Mahajan <mahajankeval23@gmail.com>
1 parent 272388c commit a988166

File tree

2 files changed

+66
-5
lines changed

2 files changed

+66
-5
lines changed

mcpgateway/toolops/toolops_altk_service.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -59,7 +59,8 @@
5959

6060
toolops_llm_provider = os.getenv("LLM_PROVIDER")
6161
toolops_llm, toolops_llm_provider_config = get_llm_instance()
62-
toolops_llm_config = LLMConfig(provider=toolops_llm_provider, config=toolops_llm_provider_config)
62+
if toolops_llm is not None and toolops_llm_provider_config is not None:
63+
toolops_llm_config = LLMConfig(provider=toolops_llm_provider, config=toolops_llm_provider_config)
6364

6465
logging_service = LoggingService()
6566
logger = logging_service.get_logger(__name__)

mcpgateway/toolops/utils/llm_util.py

Lines changed: 64 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -51,6 +51,66 @@ def get_llm_instance(model_type="completion"):
5151
Returns:
5252
llm_instance : LLM model instance used for inferencing the prompts/user inputs
5353
llm_config: LLM provider configuration provided in the environment variables
54+
55+
Examples:
56+
>>> import os
57+
>>> from unittest.mock import patch, MagicMock
58+
>>> # Setup: Define the global variable used in the function for the test context
59+
>>> global TOOLOPS_TEMPERATURE
60+
>>> TOOLOPS_TEMPERATURE = 0.7
61+
62+
>>> # Case 1: OpenAI Provider Configuration
63+
>>> # We patch os.environ to simulate specific provider settings
64+
>>> env_vars = {
65+
... "LLM_PROVIDER": "openai",
66+
... "OPENAI_API_KEY": "sk-mock-key",
67+
... "OPENAI_BASE_URL": "https://api.openai.com",
68+
... "OPENAI_MODEL": "gpt-4"
69+
... }
70+
>>> with patch.dict(os.environ, env_vars):
71+
... # Assuming OpenAIProvider and OpenAIConfig are available in the module scope
72+
... # We simulate the function call. Note: This tests the Config creation logic.
73+
... llm_instance, llm_config = get_llm_instance("completion")
74+
... llm_config.__class__.__name__
75+
'OpenAIConfig'
76+
77+
>>> # Case 2: Azure OpenAI Provider Configuration
78+
>>> env_vars = {
79+
... "LLM_PROVIDER": "azure_openai",
80+
... "AZURE_OPENAI_API_KEY": "az-mock-key",
81+
... "AZURE_OPENAI_ENDPOINT": "https://mock.azure.com",
82+
... "AZURE_OPENAI_MODEL": "gpt-35-turbo"
83+
... }
84+
>>> with patch.dict(os.environ, env_vars):
85+
... llm_instance, llm_config = get_llm_instance("chat")
86+
... llm_config.__class__.__name__
87+
'AzureOpenAIConfig'
88+
89+
>>> # Case 3: AWS Bedrock Provider Configuration
90+
>>> env_vars = {
91+
... "LLM_PROVIDER": "aws_bedrock",
92+
... "AWS_BEDROCK_MODEL_ID": "anthropic.claude-v2",
93+
... "AWS_BEDROCK_REGION": "us-east-1",
94+
... "AWS_ACCESS_KEY_ID": "mock-access",
95+
... "AWS_SECRET_ACCESS_KEY": "mock-secret"
96+
... }
97+
>>> with patch.dict(os.environ, env_vars):
98+
... llm_instance, llm_config = get_llm_instance("chat")
99+
... llm_config.__class__.__name__
100+
'AWSBedrockConfig'
101+
102+
>>> # Case 4: WatsonX Provider Configuration
103+
>>> env_vars = {
104+
... "LLM_PROVIDER": "watsonx",
105+
... "WATSONX_APIKEY": "wx-mock-key",
106+
... "WATSONX_URL": "https://us-south.ml.cloud.ibm.com",
107+
... "WATSONX_PROJECT_ID": "mock-project-id",
108+
... "WATSONX_MODEL_ID": "ibm/granite-13b"
109+
... }
110+
>>> with patch.dict(os.environ, env_vars):
111+
... llm_instance, llm_config = get_llm_instance("completion")
112+
... llm_config.__class__.__name__
113+
'WatsonxConfig'
54114
"""
55115
llm_provider = os.getenv("LLM_PROVIDER", "")
56116
llm_instance, llm_config = None, None
@@ -162,6 +222,8 @@ def get_llm_instance(model_type="completion"):
162222
max_new_tokens=wx_max_tokens,
163223
decoding_method=wx_decoding_method,
164224
)
225+
else:
226+
return None, None
165227

166228
llm_service = provider_class(llm_config)
167229
llm_instance = llm_service.get_llm(model_type=model_type)
@@ -171,10 +233,6 @@ def get_llm_instance(model_type="completion"):
171233
return llm_instance, llm_config
172234

173235

174-
completion_llm_instance, _ = get_llm_instance(model_type="completion")
175-
chat_llm_instance, _ = get_llm_instance(model_type="chat")
176-
177-
178236
def execute_prompt(prompt):
179237
"""
180238
Method for LLM inferencing using a prompt/user input
@@ -187,6 +245,8 @@ def execute_prompt(prompt):
187245
"""
188246
try:
189247
logger.info("Inferencing OpenAI provider LLM with the given prompt")
248+
completion_llm_instance, _ = get_llm_instance(model_type="completion")
249+
chat_llm_instance, _ = get_llm_instance(model_type="chat")
190250
llm_response = completion_llm_instance.invoke(prompt, stop=["\n\n", "<|endoftext|>", "###STOP###"])
191251
response = llm_response.replace("<|eom_id|>", "").strip()
192252
# logger.info("Successful - Inferencing OpenAI provider LLM")

0 commit comments

Comments
 (0)