Skip to content

Commit 5200776

Browse files
committed
fix indentations changed by renaming TopProbabilityMass to TopP
1 parent 6108d0f commit 5200776

File tree

3 files changed

+11
-12
lines changed

3 files changed

+11
-12
lines changed

azureChat.m

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,7 @@
1919
% reduce it. Setting Temperature=0 removes
2020
% randomness from the output altogether.
2121
%
22-
% TopP - Top probability mass value for controlling the
22+
% TopP - Top probability mass value for controlling the
2323
% diversity of the output. Default value is 1;
2424
% lower values imply that only the more likely
2525
% words can appear in any particular place.
@@ -62,7 +62,7 @@
6262
% azureChat Properties:
6363
% Temperature - Temperature of generation.
6464
%
65-
% TopP - Top probability mass to consider for generation.
65+
% TopP - Top probability mass to consider for generation.
6666
%
6767
% StopSequences - Sequences to stop the generation of tokens.
6868
%
@@ -99,7 +99,7 @@
9999
nvp.Tools (1,:) {mustBeA(nvp.Tools, "openAIFunction")} = openAIFunction.empty
100100
nvp.APIVersion (1,1) {mustBeAPIVersion} = "2024-02-01"
101101
nvp.Temperature {llms.utils.mustBeValidTemperature} = 1
102-
nvp.TopP {llms.utils.mustBeValidTopP} = 1
102+
nvp.TopP {llms.utils.mustBeValidTopP} = 1
103103
nvp.StopSequences {llms.utils.mustBeValidStop} = {}
104104
nvp.ResponseFormat (1,1) string {mustBeMember(nvp.ResponseFormat,["text","json"])} = "text"
105105
nvp.APIKey {mustBeNonzeroLengthTextScalar}

ollamaChat.m

Lines changed: 5 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -17,13 +17,13 @@
1717
% values reduce it. Setting Temperature=0 removes
1818
% randomness from the output altogether.
1919
%
20-
% TopP - Top probability mass value for controlling the
20+
% TopP - Top probability mass value for controlling the
2121
% diversity of the output. Default value is 1;
2222
% lower values imply that only the more likely
2323
% words can appear in any particular place.
2424
% This is also known as top-p sampling.
2525
%
26-
% TopK - Maximum number of most likely tokens that are
26+
% TopK - Maximum number of most likely tokens that are
2727
% considered for output. Default is Inf, allowing
2828
% all tokens. Smaller values reduce diversity in
2929
% the output.
@@ -34,8 +34,7 @@
3434
% tail-free sampling. Lower values reduce
3535
% diversity, with some authors recommending
3636
% values around 0.95. Tail-free sampling is
37-
% slower than using TopP or
38-
% TopK.
37+
% slower than using TopP or TopK.
3938
%
4039
% StopSequences - Vector of strings that when encountered, will
4140
% stop the generation of tokens. Default
@@ -81,8 +80,8 @@
8180
modelName {mustBeTextScalar}
8281
systemPrompt {llms.utils.mustBeTextOrEmpty} = []
8382
nvp.Temperature {llms.utils.mustBeValidTemperature} = 1
84-
nvp.TopP {llms.utils.mustBeValidTopP} = 1
85-
nvp.TopK (1,1) {mustBeReal,mustBePositive} = Inf
83+
nvp.TopP {llms.utils.mustBeValidTopP} = 1
84+
nvp.TopK (1,1) {mustBeReal,mustBePositive} = Inf
8685
nvp.StopSequences {llms.utils.mustBeValidStop} = {}
8786
nvp.ResponseFormat (1,1) string {mustBeMember(nvp.ResponseFormat,["text","json"])} = "text"
8887
nvp.TimeOut (1,1) {mustBeReal,mustBePositive} = 120

openAIChat.m

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,7 @@
2020
% reduce it. Setting Temperature=0 removes
2121
% randomness from the output altogether.
2222
%
23-
% TopP - Top probability mass value for controlling the
23+
% TopP - Top probability mass value for controlling the
2424
% diversity of the output. Default value is 1;
2525
% lower values imply that only the more likely
2626
% words can appear in any particular place.
@@ -59,7 +59,7 @@
5959
%
6060
% Temperature - Temperature of generation.
6161
%
62-
% TopP - Top probability mass to consider for generation.
62+
% TopP - Top probability mass to consider for generation.
6363
%
6464
% StopSequences - Sequences to stop the generation of tokens.
6565
%
@@ -94,7 +94,7 @@
9494
nvp.Tools (1,:) {mustBeA(nvp.Tools, "openAIFunction")} = openAIFunction.empty
9595
nvp.ModelName (1,1) string {mustBeModel} = "gpt-3.5-turbo"
9696
nvp.Temperature {llms.utils.mustBeValidTemperature} = 1
97-
nvp.TopP {llms.utils.mustBeValidTopP} = 1
97+
nvp.TopP {llms.utils.mustBeValidTopP} = 1
9898
nvp.StopSequences {llms.utils.mustBeValidStop} = {}
9999
nvp.ResponseFormat (1,1) string {mustBeMember(nvp.ResponseFormat,["text","json"])} = "text"
100100
nvp.APIKey {mustBeNonzeroLengthTextScalar}

0 commit comments

Comments
 (0)