Skip to content

Instantly share code, notes, and snippets.

@devoncarew
Created September 29, 2025 15:45
Show Gist options
  • Save devoncarew/d6fb1b676456242c10b71d24343e1828 to your computer and use it in GitHub Desktop.
Save devoncarew/d6fb1b676456242c10b71d24343e1828 to your computer and use it in GitHub Desktop.
Code snippets for generating server streaming SSE clients
# Auxillary method info.
sse-streaming-methods = [
'generativelanguage.models.streamGenerateContent',
'generativelanguage.tunedModels.streamGenerateContent'
]
class ApiClient {
...
Stream<T> postStreaming<T>(
Uri url,
Decoder<T> decoder, [
JsonEncodable? body,
]) async* {
// Create the request.
final request = http.Request('POST', _addStreaming(url));
if (body != null) {
request.body = jsonEncode(body.toJson());
}
request.headers.addAll({
_clientKey: _clientName,
...?additionalHeaders,
if (body != null) _contentTypeKey: _typeJson,
});
// Send the request and handle any error response.
final response = await client.send(request);
await _throwIfError(response);
// Parse the streaming response into SSE lines; convert those into the
// requested type.
final splitter = LineSplitter();
final lines = response.stream.toStringStream().transform(splitter);
await for (final line in lines) {
const dataPrefix = 'data: ';
if (line.startsWith(dataPrefix)) {
final jsonText = line.substring(dataPrefix.length);
final json = jsonDecode(jsonText) as Map<String, Object?>;
yield decoder(json);
}
}
}
static Uri _addStreaming(Uri url) {
final query = Map.of(url.queryParameters);
query['alt'] = 'sse';
return url.replace(queryParameters: query);
}
...
}
...
/// The generated class for the `models` resource.
class ModelsResource {
ModelsResource._(this._client)
: operations = ModelsOperationsResource._(_client);
final ApiClient _client;
/// Access the `operations` methods and resources.
final ModelsOperationsResource operations;
/// Generates multiple embedding vectors from the input `Content` which
/// consists of a batch of strings represented as `EmbedContentRequest`
/// objects.
///
/// [model] Required. The model's resource name. This serves as an ID for the
/// Model to use. This name should match a model name returned by the
/// `ListModels` method. Format: `models/{model}`
Future<BatchEmbedContentsResponse> batchEmbedContents(
BatchEmbedContentsRequest request, {
required String model,
}) async {
final url = Uri.https(_authority, 'v1/$model:batchEmbedContents');
return _client.post(url, BatchEmbedContentsResponse.fromJson, request);
}
/// Runs a model's tokenizer on input `Content` and returns the token count.
///
/// Refer to the [tokens guide](https://ai.google.dev/gemini-api/docs/tokens)
/// to learn more about tokens.
///
/// [model] Required. The model's resource name. This serves as an ID for the
/// Model to use. This name should match a model name returned by the
/// `ListModels` method. Format: `models/{model}`
Future<CountTokensResponse> countTokens(
CountTokensRequest request, {
required String model,
}) async {
final url = Uri.https(_authority, 'v1/$model:countTokens');
return _client.post(url, CountTokensResponse.fromJson, request);
}
/// Generates a [streamed
/// response](https://ai.google.dev/gemini-api/docs/text-generation?lang=python#generate-a-text-stream)
/// from the model given an input `GenerateContentRequest`.
///
/// [model] Required. The name of the `Model` to use for generating the
/// completion. Format: `models/{model}`.
Stream<GenerateContentResponse> streamGenerateContent(
GenerateContentRequest request, {
required String model,
}) {
final url = Uri.https(_authority, 'v1/$model:streamGenerateContent');
return _client.postStreaming(
url,
GenerateContentResponse.fromJson,
request,
);
}
/// Gets information about a specific `Model` such as its version number,
/// token limits,
/// [parameters](https://ai.google.dev/gemini-api/docs/models/generative-models#model-parameters)
/// and other metadata.
///
/// Refer to the [Gemini models
/// guide](https://ai.google.dev/gemini-api/docs/models/gemini) for detailed
/// model information.
///
/// [name] Required. The resource name of the model. This name should match a
/// model name returned by the `ListModels` method. Format: `models/{model}`
Future<Model> get({required String name}) async {
final url = Uri.https(_authority, 'v1/$name');
return _client.get(url, Model.fromJson);
}
/// Generates a model response given an input `GenerateContentRequest`.
///
/// Refer to the [text generation
/// guide](https://ai.google.dev/gemini-api/docs/text-generation) for detailed
/// usage information. Input capabilities differ between models, including
/// tuned models. Refer to the [model
/// guide](https://ai.google.dev/gemini-api/docs/models/gemini) and [tuning
/// guide](https://ai.google.dev/gemini-api/docs/model-tuning) for details.
///
/// [model] Required. The name of the `Model` to use for generating the
/// completion. Format: `models/{model}`.
Future<GenerateContentResponse> generateContent(
GenerateContentRequest request, {
required String model,
}) async {
final url = Uri.https(_authority, 'v1/$model:generateContent');
return _client.post(url, GenerateContentResponse.fromJson, request);
}
/// Lists the [`Model`s](https://ai.google.dev/gemini-api/docs/models/gemini)
/// available through the Gemini API.
///
/// [pageToken] A page token, received from a previous `ListModels` call.
/// Provide the `page_token` returned by one request as an argument to the
/// next request to retrieve the next page. When paginating, all other
/// parameters provided to `ListModels` must match the call that provided the
/// page token.
///
/// [pageSize] The maximum number of `Models` to return (per page). If
/// unspecified, 50 models will be returned per page. This method returns at
/// most 1000 models per page, even if you pass a larger page_size.
Future<ListModelsResponse> list({String? pageToken, int? pageSize}) async {
final url = Uri.https(_authority, 'v1/models', {
if (pageToken != null) 'pageToken': pageToken,
if (pageSize != null) 'pageSize': '$pageSize',
});
return _client.get(url, ListModelsResponse.fromJson);
}
/// Generates a text embedding vector from the input `Content` using the
/// specified [Gemini Embedding
/// model](https://ai.google.dev/gemini-api/docs/models/gemini#text-embedding).
///
/// [model] Required. The model's resource name. This serves as an ID for the
/// Model to use. This name should match a model name returned by the
/// `ListModels` method. Format: `models/{model}`
Future<EmbedContentResponse> embedContent(
EmbedContentRequest request, {
required String model,
}) async {
final url = Uri.https(_authority, 'v1/$model:embedContent');
return _client.post(url, EmbedContentResponse.fromJson, request);
}
}
...
class Generator {
...
void buildMethodCall(...) {
...
final returnType = method.isStreaming ? 'Stream' : 'Future';
final streaming = method.isStreaming ? 'Streaming' : '';
parent.methods.add(
buildMethod(
name: method.dartName,
modifier: method.isStreaming ? null : MethodModifier.async,
returns: refer('$returnType<$responseType>'),
docs: wrap(desc, fileWidth - 6).map((line) => '/// $line'),
annotations: method.deprecated ? [refer('deprecated')] : [],
requiredParameters: requestParam == null ? null : [requestParam],
optionalParameters: method.parameters.map((p) {
final opt = p.required ? '' : '?';
return buildParam(
name: p.dartName,
named: true,
required: p.required,
type: refer('${p.fieldType.dartName}$opt'),
);
}),
body: Code('''
final url = Uri.https(_authority, '$servicePath$path'$query);
return _client.$httpMethod$streaming(url, $responseBuilder$bodyParam);
'''),
),
);
...
}
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment