Skip to content

Commit e605f43

Browse files
committed
minor exception handling
Signed-off-by: Keval Mahajan <mahajankeval23@gmail.com>
1 parent a988166 commit e605f43

File tree

2 files changed

+14
-10
lines changed

2 files changed

+14
-10
lines changed

mcpgateway/toolops/toolops_altk_service.py

Lines changed: 11 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -52,18 +52,19 @@
5252
from mcpgateway.services.tool_service import ToolService
5353
from mcpgateway.toolops.utils.db_util import populate_testcases_table, query_testcases_table, query_tool_auth
5454
from mcpgateway.toolops.utils.format_conversion import convert_to_toolops_spec, post_process_nl_test_cases
55-
from mcpgateway.toolops.utils.llm_util import chat_llm_instance, get_llm_instance
55+
from mcpgateway.toolops.utils.llm_util import get_llm_instance
5656

57-
# importing toolops modules from ALTK
57+
logging_service = LoggingService()
58+
logger = logging_service.get_logger(__name__)
5859

5960

6061
toolops_llm_provider = os.getenv("LLM_PROVIDER")
6162
toolops_llm, toolops_llm_provider_config = get_llm_instance()
6263
if toolops_llm is not None and toolops_llm_provider_config is not None:
63-
toolops_llm_config = LLMConfig(provider=toolops_llm_provider, config=toolops_llm_provider_config)
64-
65-
logging_service = LoggingService()
66-
logger = logging_service.get_logger(__name__)
64+
TOOLOPS_LLM_CONFIG = LLMConfig(provider=toolops_llm_provider, config=toolops_llm_provider_config)
65+
else:
66+
logger.error("Error in obtaining LLM instance for Toolops services")
67+
TOOLOPS_LLM_CONFIG = None
6768

6869
LLM_MODEL_ID = os.getenv("OPENAI_MODEL", "")
6970
provider = os.getenv("OPENAI_BASE_URL", "")
@@ -102,6 +103,7 @@ def custom_mcp_cf_execute_prompt(prompt, client=None, gen_mode=None, parameters=
102103
# To suppress pylint errors creating dummy altk params and asserting
103104
altk_dummy_params = {"client": client, "gen_mode": gen_mode, "parameters": parameters, "max_new_tokens": max_new_tokens, "stop_sequences": stop_sequences}
104105
assert altk_dummy_params is not None
106+
chat_llm_instance, _ = get_llm_instance(model_type="chat")
105107
llm_response = chat_llm_instance.invoke(prompt)
106108
response = llm_response.content
107109
return response
@@ -203,11 +205,11 @@ async def execute_tool_nl_test_cases(tool_id, tool_nl_test_cases, tool_service:
203205
tool_auth = query_tool_auth(tool_id, db)
204206
# handling transport based on protocol type
205207
if "/mcp" in tool_url:
206-
config = MCPClientConfig(mcp_server=MCPServerConfig(url=tool_url, transport="streamable_http", headers=tool_auth), llm=toolops_llm_config)
208+
config = MCPClientConfig(mcp_server=MCPServerConfig(url=tool_url, transport="streamable_http", headers=tool_auth), llm=TOOLOPS_LLM_CONFIG)
207209
elif "/sse" in tool_url:
208-
config = MCPClientConfig(mcp_server=MCPServerConfig(url=tool_url, transport="sse", headers=tool_auth), llm=toolops_llm_config)
210+
config = MCPClientConfig(mcp_server=MCPServerConfig(url=tool_url, transport="sse", headers=tool_auth), llm=TOOLOPS_LLM_CONFIG)
209211
else:
210-
config = MCPClientConfig(mcp_server=MCPServerConfig(url=tool_url, transport="stdio", headers=tool_auth), llm=toolops_llm_config)
212+
config = MCPClientConfig(mcp_server=MCPServerConfig(url=tool_url, transport="stdio", headers=tool_auth), llm=TOOLOPS_LLM_CONFIG)
211213

212214
service = MCPChatService(config)
213215
await service.initialize()

mcpgateway/toolops/utils/llm_util.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -245,8 +245,8 @@ def execute_prompt(prompt):
245245
"""
246246
try:
247247
logger.info("Inferencing OpenAI provider LLM with the given prompt")
248+
248249
completion_llm_instance, _ = get_llm_instance(model_type="completion")
249-
chat_llm_instance, _ = get_llm_instance(model_type="chat")
250250
llm_response = completion_llm_instance.invoke(prompt, stop=["\n\n", "<|endoftext|>", "###STOP###"])
251251
response = llm_response.replace("<|eom_id|>", "").strip()
252252
# logger.info("Successful - Inferencing OpenAI provider LLM")
@@ -257,6 +257,8 @@ def execute_prompt(prompt):
257257

258258

259259
# if __name__ == "__main__":
260+
# chat_llm_instance, _ = get_llm_instance(model_type="chat")
261+
# completion_llm_instance, _ = get_llm_instance(model_type="completion")
260262
# prompt = "what is India capital city?"
261263
# print("Prompt : ", prompt)
262264
# print("Text completion output : ")

0 commit comments

Comments
 (0)