You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
-_AuthenticationType_: it specifies if the key is an actual API Key or an [Azure Active Directory token](https://learn.microsoft.com/azure/cognitive-services/openai/how-to/managed-identity) (optional, default: "ApiKey").
Copy file name to clipboardExpand all lines: src/ChatGptNet/Models/ChatGptRequest.cs
+20-14Lines changed: 20 additions & 14 deletions
Original file line number
Diff line number
Diff line change
@@ -3,7 +3,7 @@
3
3
namespaceChatGptNet.Models;
4
4
5
5
/// <summary>
6
-
/// Represents a request for a chat completions.
6
+
/// Represents a request for chat completions.
7
7
/// </summary>
8
8
/// <remarks>
9
9
/// See <see href="https://platform.openai.com/docs/api-reference/chat/create">Create chat completion (OpenAI)</see> or <see href="https://learn.microsoft.com/azure/cognitive-services/openai/reference#chat-completions">Chat Completions (Azure)</see> for more information.
@@ -32,7 +32,7 @@ internal class ChatGptRequest
32
32
/// Controls which (if any) function is called by the model.
/// <description>Model will not call a function and instead generates a message.</description>
@@ -46,7 +46,7 @@ internal class ChatGptRequest
46
46
/// <description>Specifying a particular function name forces the model to call that function.</description>
47
47
/// </item>
48
48
/// </list>
49
-
/// <see cref="ChatGptToolChoices.None"/> is the default when no functions are present. <see cref="ChatGptToolChoices.None"/> is the default if functions are present.
49
+
/// <para><see cref="ChatGptToolChoices.None"/> is the default when no functions are present. <see cref="ChatGptToolChoices.None"/> is the default if functions are present.</para>
50
50
/// </remarks>
51
51
/// <seealso cref="ChatGptFunction"/>
52
52
[JsonPropertyName("function_call")]
@@ -64,7 +64,7 @@ internal class ChatGptRequest
64
64
/// Controls which (if any) function is called by the model.
/// <description>Model will not call a function and instead generates a message.</description>
@@ -78,17 +78,23 @@ internal class ChatGptRequest
78
78
/// <description>Specifying a particular function name forces the model to call that function.</description>
79
79
/// </item>
80
80
/// </list>
81
-
/// <see cref="ChatGptToolChoices.None"/> is the default when no functions are present. <see cref="ChatGptToolChoices.None"/> is the default if functions are present.
81
+
/// <para><see cref="ChatGptToolChoices.None"/> is the default when no functions are present. <see cref="ChatGptToolChoices.None"/> is the default if functions are present.</para>
82
82
/// </remarks>
83
83
/// <seealso cref="ChatGptFunction"/>
84
84
[JsonPropertyName("tool_choice")]
85
85
publicobject?ToolChoice{get;set;}
86
86
87
87
/// <summary>
88
-
/// Gets or sets a value that specify if response will be sent in streaming as partial message deltas.
88
+
/// Gets or sets a value that specifies if the response will be sent in streaming as partial message deltas.
89
89
/// </summary>
90
90
publicboolStream{get;set;}
91
91
92
+
/// <summary>
93
+
/// Gets or sets the stream options for chat completions.
/// Gets or sets a value such that, if specified, the system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result.
94
100
/// </summary>
@@ -99,10 +105,10 @@ internal class ChatGptRequest
99
105
publicint?Seed{get;set;}
100
106
101
107
/// <summary>
102
-
/// Gets or sets what sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic (default: 1).
108
+
/// Gets or sets the sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic (default: 1).
103
109
/// </summary>
104
110
/// <remarks>
105
-
/// It is generally recommend altering this value or <see cref="TopP"/> but not both.
111
+
/// It is generally recommended to alter this value or <see cref="TopP"/> but not both.
106
112
/// </remarks>
107
113
/// <seealso cref="TopP"/>
108
114
publicdouble?Temperature{get;set;}
@@ -111,7 +117,7 @@ internal class ChatGptRequest
111
117
/// Gets or sets an alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with <see cref="TopP"/> probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered (default: 1).
112
118
/// </summary>
113
119
/// <remarks>
114
-
/// It is generally recommend altering this value or <see cref="Temperature"/> but not both.
120
+
/// It is generally recommended to alter this value or <see cref="Temperature"/> but not both.
115
121
/// </remarks>
116
122
/// <seealso cref="Temperature"/>
117
123
[JsonPropertyName("top_p")]
@@ -124,7 +130,7 @@ internal class ChatGptRequest
124
130
publicint?MaxTokens{get;set;}
125
131
126
132
/// <summary>
127
-
/// Gets or sets the presence penalties for chat completion. Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics (default: 0).
133
+
/// Gets or sets the presence penalties for chat completion. A number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics (default: 0).
128
134
/// </summary>
129
135
/// <remarks>
130
136
/// See <see href="https://platform.openai.com/docs/api-reference/parameter-details">Parameter details</see> for more information.
@@ -133,7 +139,7 @@ internal class ChatGptRequest
133
139
publicdouble?PresencePenalty{get;set;}
134
140
135
141
/// <summary>
136
-
/// Gets or sets the frequency penalties for chat completion. Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim (default: 0).
142
+
/// Gets or sets the frequency penalties for chat completion. A number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim (default: 0).
137
143
/// </summary>
138
144
/// <remarks>
139
145
/// See <see href="https://platform.openai.com/docs/api-reference/parameter-details">Parameter details</see> for more information.
/// Gets or set a value that determines whether to return log probabilities of the output tokens or not. If <see langword="true"/>, returns the log probabilities of each output token returned in the content of message (default: <see langword="false"/>).
158
+
/// Gets or sets a value that determines whether to return log probabilities of the output tokens or not. If <see langword="true"/>, returns the log probabilities of each output token returned in the content of the message (default: <see langword="false"/>).
153
159
/// </summary>
154
160
/// <seealso cref="TopLogProbabilities"/>
155
161
[JsonPropertyName("logprobs")]
@@ -159,7 +165,7 @@ internal class ChatGptRequest
159
165
/// Gets or sets a value between 0 and 5 specifying the number of most likely tokens to return at each token position, each with an associated log probability.
160
166
/// </summary>
161
167
/// <remarks>
162
-
/// <see cref="LogProbabilities"/>must be set to <see langword="true"/> if this parameter is used.
168
+
/// <see cref="LogProbabilities"/>must be set to <see langword="true"/> if this parameter is used.
163
169
/// </remarks>
164
170
/// <seealso cref="LogProbabilities"/>
165
171
[JsonPropertyName("top_logprobs")]
@@ -172,4 +178,4 @@ internal class ChatGptRequest
172
178
/// See <see href="https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids">Safety best practices</see> for more information.
0 commit comments