@@ -66,16 +66,16 @@ public struct GenerationConfig: Sendable {
66
66
/// > of variation is still possible.
67
67
///
68
68
/// > Important: The range of supported temperature values depends on the model; see the
69
- /// > [Cloud documentation](https://cloud .google.com/vertex-ai/generative-ai/docs/ model-reference/inference#generationconfig )
69
+ /// > [documentation](https://firebase .google.com/docs/ vertex-ai/model-parameters?platform=ios#temperature )
70
70
/// > for more details.
71
71
/// - topP: Controls diversity of generated text. Higher values (e.g., 0.9) produce more diverse
72
72
/// text, while lower values (e.g., 0.5) make the output more focused.
73
73
///
74
74
/// The supported range is 0.0 to 1.0.
75
75
///
76
76
/// > Important: The default `topP` value depends on the model; see the
77
- /// [Cloud documentation](https://cloud .google.com/vertex-ai/generative-ai/docs/ model-reference/inference#generationconfig )
78
- /// for more details.
77
+ /// > [ documentation](https://firebase .google.com/docs/ vertex-ai/model-parameters?platform=ios#top-p )
78
+ /// > for more details.
79
79
/// - topK: Limits the number of highest probability words the model considers when generating
80
80
/// text. For example, a topK of 40 means only the 40 most likely words are considered for the
81
81
/// next token. A higher value increases diversity, while a lower value makes the output more
@@ -84,7 +84,7 @@ public struct GenerationConfig: Sendable {
84
84
/// The supported range is 1 to 40.
85
85
///
86
86
/// > Important: Support for `topK` and the default value depends on the model; see the
87
- /// [Cloud documentation](https://cloud .google.com/vertex-ai/generative-ai/docs/ model-reference/inference#generationconfig )
87
+ /// [documentation](https://firebase .google.com/docs/ vertex-ai/model-parameters?platform=ios#top-k )
88
88
/// for more details.
89
89
/// - candidateCount: The number of response variations to return; defaults to 1 if not set.
90
90
/// Support for multiple candidates depends on the model; see the
@@ -137,8 +137,9 @@ public struct GenerationConfig: Sendable {
137
137
/// - `application/json`: Schema for JSON response.
138
138
///
139
139
/// Refer to the
140
- /// [Control generated output](https://cloud.google.com/vertex-ai/generative-ai/docs/multimodal/control-generated-output)
141
- /// guide for more details.
140
+ /// [Generate structured
141
+ /// output](https://firebase.google.com/docs/vertex-ai/structured-output?platform=ios) guide
142
+ /// for more details.
142
143
public init ( temperature: Float ? = nil , topP: Float ? = nil , topK: Int ? = nil ,
143
144
candidateCount: Int ? = nil , maxOutputTokens: Int ? = nil ,
144
145
presencePenalty: Float ? = nil , frequencyPenalty: Float ? = nil ,
0 commit comments