Skip to content

Commit

Permalink
Merge pull request #974 from microsoft/isidorn/resulting-whale
Browse files Browse the repository at this point in the history
Update to latest languageModel API
  • Loading branch information
isidorn authored Feb 28, 2024
2 parents 9d7f51b + b69d6d7 commit 47c58ff
Show file tree
Hide file tree
Showing 3 changed files with 122 additions and 72 deletions.
3 changes: 1 addition & 2 deletions chat-sample/package.json
Original file line number Diff line number Diff line change
Expand Up @@ -15,8 +15,7 @@
"github.copilot-chat"
],
"categories": [
"AI",
"Chat"
"Other"
],
"activationEvents": [
"onStartupFinished"
Expand Down
28 changes: 12 additions & 16 deletions chat-sample/src/extension.ts
Original file line number Diff line number Diff line change
Expand Up @@ -20,13 +20,12 @@ export function activate(context: vscode.ExtensionContext) {
// The GitHub Copilot Chat extension implements this provider.
if (request.command == 'teach') {
stream.progress('Picking the right topic to teach...');
const access = await vscode.lm.requestLanguageModelAccess(LANGUAGE_MODEL_ID);
const topic = getTopic(context.history);
const messages = [
new vscode.LanguageModelSystemMessage('You are a cat! Your job is to explain computer science concepts in the funny manner of a cat. Always start your response by stating what concept you are explaining. Always include code samples.'),
new vscode.LanguageModelUserMessage(topic)
new vscode.LanguageModelChatSystemMessage('You are a cat! Your job is to explain computer science concepts in the funny manner of a cat. Always start your response by stating what concept you are explaining. Always include code samples.'),
new vscode.LanguageModelChatUserMessage(topic)
];
const chatRequest = access.makeChatRequest(messages, {}, token);
const chatRequest = await vscode.lm.sendChatRequest(LANGUAGE_MODEL_ID, messages, {}, token);
for await (const fragment of chatRequest.stream) {
stream.markdown(fragment);
}
Expand All @@ -39,24 +38,22 @@ export function activate(context: vscode.ExtensionContext) {
return { metadata: { command: 'teach' } };
} else if (request.command == 'play') {
stream.progress('Throwing away the computer science books and preparing to play with some Python code...');
const access = await vscode.lm.requestLanguageModelAccess(LANGUAGE_MODEL_ID);
const messages = [
new vscode.LanguageModelSystemMessage('You are a cat! Reply in the voice of a cat, using cat analogies when appropriate. Be concise to prepare for cat play time.'),
new vscode.LanguageModelUserMessage('Give a small random python code samples (that have cat names for variables). ' + request.prompt)
new vscode.LanguageModelChatSystemMessage('You are a cat! Reply in the voice of a cat, using cat analogies when appropriate. Be concise to prepare for cat play time.'),
new vscode.LanguageModelChatUserMessage('Give a small random python code samples (that have cat names for variables). ' + request.prompt)
];
const chatRequest = access.makeChatRequest(messages, {}, token);
const chatRequest = await vscode.lm.sendChatRequest(LANGUAGE_MODEL_ID, messages, {}, token);
for await (const fragment of chatRequest.stream) {
stream.markdown(fragment);
}
return { metadata: { command: 'play' } };
} else {
const access = await vscode.lm.requestLanguageModelAccess(LANGUAGE_MODEL_ID);
const messages = [
new vscode.LanguageModelSystemMessage(`You are a cat! Think carefully and step by step like a cat would.
new vscode.LanguageModelChatSystemMessage(`You are a cat! Think carefully and step by step like a cat would.
Your job is to explain computer science concepts in the funny manner of a cat, using cat metaphors. Always start your response by stating what concept you are explaining. Always include code samples.`),
new vscode.LanguageModelUserMessage(request.prompt)
new vscode.LanguageModelChatUserMessage(request.prompt)
];
const chatRequest = access.makeChatRequest(messages, {}, token);
const chatRequest = await vscode.lm.sendChatRequest(LANGUAGE_MODEL_ID, messages, {}, token);
for await (const fragment of chatRequest.stream) {
// Process the output from the language model
// Replace all python function definitions with cat sounds to make the user stop looking at the code and start playing with the cat
Expand Down Expand Up @@ -122,13 +119,12 @@ export function activate(context: vscode.ExtensionContext) {
vscode.commands.registerTextEditorCommand(CAT_NAMES_COMMAND_ID, async (textEditor: vscode.TextEditor) => {
// Replace all variables in active editor with cat names and words
const text = textEditor.document.getText();
const access = await vscode.lm.requestLanguageModelAccess(LANGUAGE_MODEL_ID);
const messages = [
new vscode.LanguageModelSystemMessage(`You are a cat! Think carefully and step by step like a cat would.
new vscode.LanguageModelChatSystemMessage(`You are a cat! Think carefully and step by step like a cat would.
Your job is to replace all variable names in the following code with funny cat variable names. Be creative. IMPORTANT respond just with code. Do not use markdown!`),
new vscode.LanguageModelUserMessage(text)
new vscode.LanguageModelChatUserMessage(text)
];
const chatRequest = access.makeChatRequest(messages, {}, new vscode.CancellationTokenSource().token);
const chatRequest = await vscode.lm.sendChatRequest(LANGUAGE_MODEL_ID, messages, {}, new vscode.CancellationTokenSource().token);

// Clear the editor content before inserting new content
await textEditor.edit(edit => {
Expand Down
163 changes: 109 additions & 54 deletions chat-sample/src/vscode.proposed.languageModels.d.ts
Original file line number Diff line number Diff line change
Expand Up @@ -8,20 +8,14 @@ declare module 'vscode' {
/**
* Represents a language model response.
*
* @see {@link LanguageModelAccess.makeChatRequest}
* @see {@link LanguageModelAccess.chatRequest}
*/
export interface LanguageModelResponse {

/**
* The overall result of the request which represents failure or success
* but. The concrete value is not specified and depends on the selected language model.
*
* *Note* that the actual response represented by the {@link LanguageModelResponse.stream `stream`}-property
*/
result: Thenable<unknown>;
export interface LanguageModelChatResponse {

/**
* An async iterable that is a stream of text chunks forming the overall response.
*
* *Note* that this stream will error when during receiving an error occurrs.
*/
stream: AsyncIterable<string>;
}
Expand All @@ -35,7 +29,7 @@ declare module 'vscode' {
* *Note* that a language model may choose to add additional system messages to the ones
* provided by extensions.
*/
export class LanguageModelSystemMessage {
export class LanguageModelChatSystemMessage {

/**
* The content of this message.
Expand All @@ -53,7 +47,7 @@ declare module 'vscode' {
/**
* A language model message that represents a user message.
*/
export class LanguageModelUserMessage {
export class LanguageModelChatUserMessage {

/**
* The content of this message.
Expand All @@ -78,7 +72,7 @@ declare module 'vscode' {
* A language model message that represents an assistant message, usually in response to a user message
* or as a sample response/reply-pair.
*/
export class LanguageModelAssistantMessage {
export class LanguageModelChatAssistantMessage {

/**
* The content of this message.
Expand All @@ -93,63 +87,76 @@ declare module 'vscode' {
constructor(content: string);
}

export type LanguageModelMessage = LanguageModelSystemMessage | LanguageModelUserMessage | LanguageModelAssistantMessage;
export type LanguageModelChatMessage = LanguageModelChatSystemMessage | LanguageModelChatUserMessage | LanguageModelChatAssistantMessage;


/**
* Represents access to using a language model. Access can be revoked at any time and extension
* must check if the access is {@link LanguageModelAccess.isRevoked still valid} before using it.
* An event describing the change in the set of available language models.
*/
export interface LanguageModelAccess {

export interface LanguageModelChangeEvent {
/**
* Whether the access to the language model has been revoked.
* Added language models.
*/
readonly added: readonly string[];
/**
* Removed language models.
*/
readonly isRevoked: boolean;
readonly removed: readonly string[];
}

/**
* An error type for language model specific errors.
*
* Consumers of language models should check the code property to determine specific
* failure causes, like `if(someError.code === vscode.LanguageModelError.NotFound.name) {...}`
* for the case of referring to an unknown language model.
*/
export class LanguageModelError extends Error {

/**
* An event that is fired when the access the language model has has been revoked or re-granted.
* The language model does not exist.
*/
// TODO@API NAME?
readonly onDidChangeAccess: Event<void>;
static NotFound(message?: string): LanguageModelError;

/**
* The name of the model.
*
* It is expected that the model name can be used to lookup properties like token limits or what
* `options` are available.
* The requestor does not have permissions to use this
* language model
*/
readonly model: string;
static NoPermissions(message?: string): LanguageModelError;

/**
* Make a request to the language model.
* A code that identifies this error.
*
* *Note:* This will throw an error if access has been revoked.
*
* @param messages
* @param options
* Possible values are names of errors, like {@linkcode LanguageModelError.NotFound NotFound},
* or `Unknown` for unspecified errors from the language model itself. In the latter case the
* `cause`-property will contain the actual error.
*/
makeChatRequest(messages: LanguageModelMessage[], options: { [name: string]: any }, token: CancellationToken): LanguageModelResponse;
readonly code: string;
}

export interface LanguageModelAccessOptions {
/**
* Options for making a chat request using a language model.
*
* @see {@link lm.chatRequest}
*/
export interface LanguageModelChatRequestOptions {

/**
* A human-readable message that explains why access to a language model is needed and what feature is enabled by it.
*/
justification?: string;
}

/**
* An event describing the change in the set of available language models.
*/
export interface LanguageModelChangeEvent {
/**
* Added language models.
* Do not show the consent UI if the user has not yet granted access to the language model but fail the request instead.
*/
readonly added: readonly string[];
// TODO@API Revisit this, how do you do the first request?
silent?: boolean;

/**
* Removed language models.
* A set of options that control the behavior of the language model. These options are specific to the language model
* and need to be lookup in the respective documentation.
*/
readonly removed: readonly string[];
modelOptions?: { [name: string]: any };
}

/**
Expand All @@ -158,18 +165,30 @@ declare module 'vscode' {
export namespace lm {

/**
* Request access to a language model.
* Make a chat request using a language model.
*
* - *Note 1:* This function will throw an error when the user didn't grant access or when the
* requested language model is not available.
* *Note* that language model use may be subject to access restrictions and user consent. This function will return a rejected promise
* if access to the language model is not possible. Reasons for this can be:
*
* - *Note 2:* It is OK to hold on to the returned access object and use it later, but extensions
* should check {@link LanguageModelAccess.isRevoked} before using it.
* - user consent not given
* - quote limits exceeded
* - model does not exist
*
* @param id The id of the language model, see {@link languageModels} for valid values.
* @returns A thenable that resolves to a language model access object, rejects if access wasn't granted
*/
export function requestLanguageModelAccess(id: string, options?: LanguageModelAccessOptions): Thenable<LanguageModelAccess>;
* @param languageModel A language model identifier. See {@link languageModels} for aviailable values.
* @param messages An array of message instances.
* @param options Objects that control the request.
* @param token A cancellation token which controls the request. See {@link CancellationTokenSource} for how to create one.
* @returns A thenable that resolves to a {@link LanguageModelChatResponse}. The promise will reject when the request couldn't be made.
*/
// TODO@API refine doc
// TODO@API ✅ ExtensionContext#permission#languageModels: { languageModel: string: LanguageModelAccessInformation}
// TODO@API ✅ define specific error types?
// TODO@API ✅ NAME: sendChatRequest, fetchChatResponse, makeChatRequest, chat, chatRequest sendChatRequest
// TODO@API ✅ NAME: LanguageModelChatXYZMessage
// TODO@API ✅ errors on everything that prevents us to make the actual request
// TODO@API ✅ double auth
// TODO@API ✅ NAME: LanguageModelChatResponse, ChatResponse, ChatRequestResponse
export function sendChatRequest(languageModel: string, messages: LanguageModelChatMessage[], options: LanguageModelChatRequestOptions, token: CancellationToken): Thenable<LanguageModelChatResponse>;

/**
* The identifiers of all language models that are currently available.
Expand All @@ -181,4 +200,40 @@ declare module 'vscode' {
*/
export const onDidChangeLanguageModels: Event<LanguageModelChangeEvent>;
}
}

/**
* Represents extension specific information about the access to language models.
*/
export interface LanguageModelAccessInformation {

/**
* An event that fires when access information changes.
*/
onDidChange: Event<void>;

/**
* Checks if a request can be made to a language model.
*
* *Note* that calling this function will not trigger a consent UI but just checks.
*
* @param languageModelId A language model identifier.
* @return `true` if a request can be made, `false` if not, `undefined` if the language
* model does not exist or consent hasn't been asked for.
*/
canSendRequest(languageModelId: string): boolean | undefined;

// TODO@API SYNC or ASYNC?
// TODO@API future
// retrieveQuota(languageModelId: string): { remaining: number; resets: Date };
}

export interface ExtensionContext {

/**
* An object that keeps information about how this extension can use language models.
*
* @see {@link lm.sendChatRequest}
*/
readonly languageModelAccessInformation: LanguageModelAccessInformation;
}
}

0 comments on commit 47c58ff

Please sign in to comment.