|
61 | 61 | nvp.sendRequestFcn |
62 | 62 | end |
63 | 63 |
|
64 | | -URL = endpoint + "openai/deployments/" + deploymentID + "/chat/completions?api-version=" + nvp.APIVersion; |
| 64 | +URL = matlab.net.URI(endpoint); |
| 65 | +URL.Path = [URL.Path, "openai", "deployments", deploymentID, "chat", "completions"]; |
| 66 | +URL.Query = matlab.net.QueryParameter("api-version", nvp.APIVersion); |
65 | 67 |
|
66 | 68 | parameters = buildParametersCall(messages, functions, nvp); |
67 | 69 |
|
68 | | -[response, streamedText] = nvp.sendRequestFcn(parameters,nvp.APIKey, URL, nvp.TimeOut, nvp.StreamFun); |
| 70 | +[response, streamedText] = nvp.sendRequestFcn(parameters,nvp.APIKey, URL.EncodedURI, nvp.TimeOut, nvp.StreamFun); |
69 | 71 |
|
70 | 72 | % For old models like GPT-3.5, we may have to change the request sent a |
71 | 73 | % little. Since we cannot detect the model used other than trying to send a |
|
75 | 77 | isfield(response.Body.Data.error,"message") && ... |
76 | 78 | response.Body.Data.error.message == "Unrecognized request argument supplied: max_completion_tokens" |
77 | 79 | parameters = renameStructField(parameters,'max_completion_tokens','max_tokens'); |
78 | | - [response, streamedText] = nvp.sendRequestFcn(parameters,nvp.APIKey, URL, nvp.TimeOut, nvp.StreamFun); |
| 80 | + [response, streamedText] = nvp.sendRequestFcn(parameters,nvp.APIKey, URL.EncodedURI, nvp.TimeOut, nvp.StreamFun); |
79 | 81 | end |
80 | 82 |
|
81 | 83 | % If call errors, "choices" will not be part of response.Body.Data, instead |
|
0 commit comments