Skip to content

Commit 272388c

Browse files
committed
temp remove failed doctest due to env
Signed-off-by: Keval Mahajan <mahajankeval23@gmail.com>
1 parent 4a4f4cd commit 272388c

File tree

1 file changed

+0
-60
lines changed

1 file changed

+0
-60
lines changed

mcpgateway/toolops/utils/llm_util.py

Lines changed: 0 additions & 60 deletions
Original file line numberDiff line numberDiff line change
@@ -51,66 +51,6 @@ def get_llm_instance(model_type="completion"):
5151
Returns:
5252
llm_instance : LLM model instance used for inferencing the prompts/user inputs
5353
llm_config: LLM provider configuration provided in the environment variables
54-
55-
Examples:
56-
>>> import os
57-
>>> from unittest.mock import patch, MagicMock
58-
>>> # Setup: Define the global variable used in the function for the test context
59-
>>> global TOOLOPS_TEMPERATURE
60-
>>> TOOLOPS_TEMPERATURE = 0.7
61-
62-
>>> # Case 1: OpenAI Provider Configuration
63-
>>> # We patch os.environ to simulate specific provider settings
64-
>>> env_vars = {
65-
... "LLM_PROVIDER": "openai",
66-
... "OPENAI_API_KEY": "sk-mock-key",
67-
... "OPENAI_BASE_URL": "https://api.openai.com",
68-
... "OPENAI_MODEL": "gpt-4"
69-
... }
70-
>>> with patch.dict(os.environ, env_vars):
71-
... # Assuming OpenAIProvider and OpenAIConfig are available in the module scope
72-
... # We simulate the function call. Note: This tests the Config creation logic.
73-
... llm_instance, llm_config = get_llm_instance("completion")
74-
... llm_config.__class__.__name__
75-
'OpenAIConfig'
76-
77-
>>> # Case 2: Azure OpenAI Provider Configuration
78-
>>> env_vars = {
79-
... "LLM_PROVIDER": "azure_openai",
80-
... "AZURE_OPENAI_API_KEY": "az-mock-key",
81-
... "AZURE_OPENAI_ENDPOINT": "https://mock.azure.com",
82-
... "AZURE_OPENAI_MODEL": "gpt-35-turbo"
83-
... }
84-
>>> with patch.dict(os.environ, env_vars):
85-
... llm_instance, llm_config = get_llm_instance("chat")
86-
... llm_config.__class__.__name__
87-
'AzureOpenAIConfig'
88-
89-
>>> # Case 3: AWS Bedrock Provider Configuration
90-
>>> env_vars = {
91-
... "LLM_PROVIDER": "aws_bedrock",
92-
... "AWS_BEDROCK_MODEL_ID": "anthropic.claude-v2",
93-
... "AWS_BEDROCK_REGION": "us-east-1",
94-
... "AWS_ACCESS_KEY_ID": "mock-access",
95-
... "AWS_SECRET_ACCESS_KEY": "mock-secret"
96-
... }
97-
>>> with patch.dict(os.environ, env_vars):
98-
... llm_instance, llm_config = get_llm_instance("chat")
99-
... llm_config.__class__.__name__
100-
'AWSBedrockConfig'
101-
102-
>>> # Case 4: WatsonX Provider Configuration
103-
>>> env_vars = {
104-
... "LLM_PROVIDER": "watsonx",
105-
... "WATSONX_APIKEY": "wx-mock-key",
106-
... "WATSONX_URL": "https://us-south.ml.cloud.ibm.com",
107-
... "WATSONX_PROJECT_ID": "mock-project-id",
108-
... "WATSONX_MODEL_ID": "ibm/granite-13b"
109-
... }
110-
>>> with patch.dict(os.environ, env_vars):
111-
... llm_instance, llm_config = get_llm_instance("completion")
112-
... llm_config.__class__.__name__
113-
'WatsonxConfig'
11454
"""
11555
llm_provider = os.getenv("LLM_PROVIDER", "")
11656
llm_instance, llm_config = None, None

0 commit comments

Comments
 (0)