From 540ebad67cc72226e4f9ba24da7cd322e81640e2 Mon Sep 17 00:00:00 2001
From: Meor Amer <92068895+mrmer1@users.noreply.github.com>
Date: Thu, 13 Feb 2025 00:55:09 +0800
Subject: [PATCH] Refresh get started section (#329)
* add quickstarts - v1 and placeholder v2
* add quickstarts v2 API
* misc changes
* update quickstarts - pre snippets
* update quickstarts and other docs
* update quickstart descriptions
* update quickstart snippets
* update installation page
* remove unneeded pages
* update yml
* update description
* update some text
---------
Co-authored-by: billytrend-cohere <144115527+billytrend-cohere@users.noreply.github.com>
---
fern/pages/get-started/installation.mdx | 72 +++
...uage-models.mdx => introduction-start.mdx} | 0
.../pages/get-started/playground-overview.mdx | 12 +-
.../get-started/quickstart/rag-quickstart.mdx | 209 +++++++++
.../quickstart/reranking-quickstart.mdx | 171 +++++++
.../quickstart/sem-search-quickstart.mdx | 304 ++++++++++++
.../quickstart/text-gen-quickstart.mdx | 358 +++++++++++++++
.../quickstart/tool-use-quickstart.mdx | 307 +++++++++++++
.../pages/get-started/the-cohere-platform.mdx | 33 +-
.../get-started/quickstart/rag-quickstart.mdx | 234 ++++++++++
.../quickstart/reranking-quickstart.mdx | 206 +++++++++
.../quickstart/sem-search-quickstart.mdx | 304 ++++++++++++
.../quickstart/text-gen-quickstart.mdx | 434 ++++++++++++++++++
.../quickstart/tool-use-quickstart.mdx | 353 ++++++++++++++
fern/v1.yml | 36 +-
fern/v2.yml | 36 +-
16 files changed, 3033 insertions(+), 36 deletions(-)
create mode 100644 fern/pages/get-started/installation.mdx
rename fern/pages/get-started/{introduction-to-large-language-models.mdx => introduction-start.mdx} (100%)
create mode 100644 fern/pages/get-started/quickstart/rag-quickstart.mdx
create mode 100644 fern/pages/get-started/quickstart/reranking-quickstart.mdx
create mode 100644 fern/pages/get-started/quickstart/sem-search-quickstart.mdx
create mode 100644 fern/pages/get-started/quickstart/text-gen-quickstart.mdx
create mode 100644 fern/pages/get-started/quickstart/tool-use-quickstart.mdx
create mode 100644 fern/pages/v2/get-started/quickstart/rag-quickstart.mdx
create mode 100644 fern/pages/v2/get-started/quickstart/reranking-quickstart.mdx
create mode 100644 fern/pages/v2/get-started/quickstart/sem-search-quickstart.mdx
create mode 100644 fern/pages/v2/get-started/quickstart/text-gen-quickstart.mdx
create mode 100644 fern/pages/v2/get-started/quickstart/tool-use-quickstart.mdx
diff --git a/fern/pages/get-started/installation.mdx b/fern/pages/get-started/installation.mdx
new file mode 100644
index 000000000..3e9da28b6
--- /dev/null
+++ b/fern/pages/get-started/installation.mdx
@@ -0,0 +1,72 @@
+---
+title: Installation
+slug: /docs/get-started-installation
+
+description: "A guide for installing the Cohere SDK, supported in 4 different languages – Python, TypeScript, Java, and Go."
+image: "../../assets/images/f1cc130-cohere_meta_image.jpg"
+keywords: "Cohere, Cohere SDK, API v1"
+---
+
+## Platform options
+
+To be able to use Cohere’s models, first choose the platform where you want to access the model from. Cohere's models are available on the following platforms:
+
+| Platform | Description | Setup Guide |
+|-----------------|-----------------------------------------------------------------------------|----------------------------------------------------------------------------|
+| Cohere Platform | The fastest way to start using Cohere’s models. Hosted on Cohere infrastructure and available on our public SaaS platform. | [Sign up](https://dashboard.cohere.com/welcome/register) and get an [API key](https://dashboard.cohere.com/api-keys) (trial key available) |
+| Private Deployments | For enterprises looking to deploy the Cohere stack privately on the cloud or on-prem. | [Setup guide](https://docs.cohere.com/docs/single-container-on-private-clouds) |
+| Cloud deployments | Managed services from cloud providers that enable access to Cohere's models. | • [Amazon Bedrock](https://docs.cohere.com/docs/amazon-bedrock#prerequisites)
• [Amazon SageMaker](https://docs.cohere.com/docs/amazon-sagemaker-setup-guide#prerequisites)
• [Azure AI Foundry](https://docs.cohere.com/docs/cohere-on-microsoft-azure#prerequisites)
• [Oracle OCI](https://docs.cohere.com/docs/oracle-cloud-infrastructure-oci) |
+## Model usage
+
+You can then use the models via these options:
+
+- [SDK](https://docs.cohere.com/v1/reference/about#sdks). We support the following SDKs:
+ - [Python](https://github.com/cohere-ai/cohere-python)
+ - [TypeScript](https://github.com/cohere-ai/cohere-typescript)
+ - [Java](https://github.com/cohere-ai/cohere-java)
+ - [Go](https://github.com/cohere-ai/cohere-go)
+- [CLI tool](https://docs.cohere.com/v1/reference/command)
+- [Playground](https://docs.cohere.com/v1/docs/playground-overview)
+
+
+## Installation
+
+To install the Cohere SDK, choose from the following 4 languages:
+
+
+
+
+```bash
+pip install -U cohere
+```
+[Source](https://github.com/cohere-ai/cohere-python)
+
+
+
+
+```bash
+npm i -s cohere-ai
+```
+[Source](https://github.com/cohere-ai/cohere-typescript)
+
+
+
+
+
+```gradle
+implementation 'com.cohere:cohere-java:1.x.x'
+```
+[Source](https://github.com/cohere-ai/cohere-java)
+
+
+
+
+
+```bash
+go get github.com/cohere-ai/cohere-go/v2
+```
+
+[Source](https://github.com/cohere-ai/cohere-go)
+
+
+
diff --git a/fern/pages/get-started/introduction-to-large-language-models.mdx b/fern/pages/get-started/introduction-start.mdx
similarity index 100%
rename from fern/pages/get-started/introduction-to-large-language-models.mdx
rename to fern/pages/get-started/introduction-start.mdx
diff --git a/fern/pages/get-started/playground-overview.mdx b/fern/pages/get-started/playground-overview.mdx
index 056ffb522..e8b02992a 100644
--- a/fern/pages/get-started/playground-overview.mdx
+++ b/fern/pages/get-started/playground-overview.mdx
@@ -11,26 +11,22 @@ updatedAt: 'Wed May 29 2024 18:55:51 GMT+0000 (Coordinated Universal Time)'
---
## What is the Playground?
-The [Developer Playground](https://dashboard.cohere.com/playground) is a visual interface for users to try out our APIs and iterate on prompts without writing a single line of code. Use the Playground to test your use cases and when you're ready to start building, simply click `View Code` to add Cohere's functionality to your application.
+The [Cohere Playground](https://dashboard.cohere.com/playground) is a visual interface for users to try out our APIs and iterate on prompts without writing a single line of code. Use the Playground to test your use cases and when you're ready to start building, simply click `CODE` to get the code version that you can add to your application.
## Using the Playground
### Chat
-The Chat API provides a natural language response to a prompt. You can use the Chat Playground to generate text, answer a question or create content. There is a default preamble, which you can change in the playground. The preamble and the messages are sent to the model to generate a response.
+The [Chat endpoint](/reference/chat) provides a natural language response to a prompt. You can use the Chat Playground to generate text, answer a question or create content. There is a default preamble, which you can change in the playground. The preamble and the messages are sent to the model to generate a response.
- To write inputs that produce the best results for your use case, read our [Prompt Engineering](/docs/prompt-engineering) guide.
- Try tinkering with different [temperature](/docs/temperature) to get different outputs.
-- You can also toggle raw prompting on the playground. If raw prompting is turned off, the user's input will be sent to the playground without any preprocessing.
-
-
-
### Embed
-Using [Embed](/reference/embed) in the Playground enables users to assign numerical representations to strings and visualize comparative meaning on a 2-dimensional plane. Phrases similar in meaning should ideally be closer together on this visualization. Add a couple of your own phrases and see if the Playground visualization feels accurate to you.
+The [Embed endpoint](/reference/embed) enables users to assign numerical representations to strings and visualize comparative meaning on a 2-dimensional plane. Phrases similar in meaning should ideally be closer together on this visualization. Add a couple of your own phrases and see if the Playground visualization feels accurate to you.
@@ -39,7 +35,7 @@ Cohere [embeddings](/docs/embeddings) can be used to train a semantic classifier
### Classify
-The Cohere [Classify](/reference/classify) endpoint enables users to create a classifier from a few labeled examples.
+The [Classify endpoint](/reference/classify) enables users to create a classifier from a few labeled examples.
diff --git a/fern/pages/get-started/quickstart/rag-quickstart.mdx b/fern/pages/get-started/quickstart/rag-quickstart.mdx
new file mode 100644
index 000000000..237e95dea
--- /dev/null
+++ b/fern/pages/get-started/quickstart/rag-quickstart.mdx
@@ -0,0 +1,209 @@
+---
+title: Retrieval Augmented Generation (RAG)
+slug: /docs/rag-quickstart
+
+description: "A quickstart guide for performing retrieval augmented generation (RAG) with Cohere's Command models (v1 API)."
+image: "../../../assets/images/f1cc130-cohere_meta_image.jpg"
+keywords: "Cohere, RAG, retrieval augmented generation, chatbot, command models"
+---
+
+Retrieval Augmented Generation (RAG) enables an LLM to ground its responses on external documents, thus improving the accuracy of its responses and minimizing hallucinations.
+
+The Chat endpoint comes with built-in RAG capabilities such as document grounding and citation generation.
+
+This quickstart guide shows you how to perform RAG with the Chat endpoint.
+
+
+### Setup
+First, install the Cohere Python SDK with the following command.
+
+```bash
+pip install -U cohere
+```
+
+Next, import the library and create a client.
+
+
+
+
+```python PYTHON
+import cohere
+
+co = cohere.Client("COHERE_API_KEY") # Get your free API key here: https://dashboard.cohere.com/api-keys
+```
+
+
+
+
+```python PYTHON
+import cohere
+
+co = cohere.Client(
+ api_key="", # Leave this blank
+ base_url=""
+)
+```
+
+
+
+
+
+```python PYTHON
+import cohere
+
+co = cohere.BedrockClient(
+ aws_region="AWS_REGION",
+ aws_access_key="AWS_ACCESS_KEY_ID",
+ aws_secret_key="AWS_SECRET_ACCESS_KEY",
+ aws_session_token="AWS_SESSION_TOKEN",
+)
+
+# Get the model name: https://docs.aws.amazon.com/bedrock/latest/userguide/models-supported.html
+```
+
+
+
+
+```python PYTHON
+import cohere
+
+co = cohere.SagemakerClient(
+ aws_region="AWS_REGION",
+ aws_access_key="AWS_ACCESS_KEY_ID",
+ aws_secret_key="AWS_SECRET_ACCESS_KEY",
+ aws_session_token="AWS_SESSION_TOKEN",
+)
+```
+
+
+
+
+```python PYTHON
+import cohere
+
+co = cohere.Client(
+ api_key="AZURE_API_KEY",
+ base_url="AZURE_ENDPOINT",
+)
+```
+
+
+
+
+### Documents
+
+First, define the documents that will passed as the context for RAG. These documents are typically retrieved from sources such as vector databases via semantic search, or any system that can retrieve unstructured data given a user query.
+
+Each document can take any number of fields e.g. `title`, `url`, `text`, etc.
+
+```python PYTHON
+documents = [
+ {
+ "text": "Reimbursing Travel Expenses: Easily manage your travel expenses by submitting them through our finance tool. Approvals are prompt and straightforward."
+ },
+ {
+ "text": "Health and Wellness Benefits: We care about your well-being and offer gym memberships, on-site yoga classes, and comprehensive health insurance."
+ },
+]
+```
+### Response Generation
+
+Next, call the Chat API by passing the documents in the `documents` parameter. This tells the model to run in RAG-mode and use these documents as the context in its response.
+
+
+
+```python PYTHON
+message = "Are there fitness-related benefits?"
+
+response = co.chat(
+ model="command-r-plus-08-2024",
+ message=message,
+ documents=documents
+)
+
+print(response.text)
+```
+
+
+
+```python PYTHON
+message = "Are there fitness-related benefits?"
+
+response = co.chat(
+ model="command-r-plus-08-2024",
+ message=message,
+ documents=documents
+)
+
+print(response.text)
+```
+
+
+
+```python PYTHON
+message = "Are there fitness-related benefits?"
+
+response = co.chat(
+ model="YOUR_MODEL_NAME",
+ message=message,
+ documents=documents
+)
+
+print(response.text)
+```
+
+
+
+```python PYTHON
+message = "Are there fitness-related benefits?"
+
+response = co.chat(
+ model="YOUR_ENDPOINT_NAME",
+ message=message,
+ documents=documents
+)
+
+print(response.text)
+```
+
+
+
+```python PYTHON
+message = "Are there fitness-related benefits?"
+
+response = co.chat(
+ message=message,
+ documents=documents
+)
+
+print(response.text)
+```
+
+
+
+```mdx wordWrap
+Yes, we offer gym memberships, on-site yoga classes, and comprehensive health insurance.
+
+```
+
+### Citation Generation
+
+The response object contains a `citations` field, which contains specific text spans from the documents on which the response is grounded.
+
+```python PYTHON
+if response.citations:
+ for citation in response.citations:
+ print(citation, "\n")
+```
+
+```mdx wordWrap
+start=14 end=88 text='gym memberships, on-site yoga classes, and comprehensive health insurance.' document_ids=['doc_1']
+
+```
+
+
+
+## Further Resources
+- [Chat endpoint API reference](https://docs.cohere.com/v1/reference/chat)
+- [Documentation on RAG](https://docs.cohere.com/v1/docs/retrieval-augmented-generation-rag)
+- [LLM University module on RAG](https://cohere.com/llmu#rag)
\ No newline at end of file
diff --git a/fern/pages/get-started/quickstart/reranking-quickstart.mdx b/fern/pages/get-started/quickstart/reranking-quickstart.mdx
new file mode 100644
index 000000000..bf0882d23
--- /dev/null
+++ b/fern/pages/get-started/quickstart/reranking-quickstart.mdx
@@ -0,0 +1,171 @@
+---
+title: Reranking
+slug: /docs/reranking-quickstart
+
+description: "A quickstart guide for performing reranking with Cohere's Reranking models (v1 API)."
+image: "../../../assets/images/f1cc130-cohere_meta_image.jpg"
+keywords: "Cohere, reranking, semantic search, rerank models"
+---
+Cohere's reranking models are available via the Rerank endpoint. This endpoint provides a powerful semantic boost to the search quality of any keyword or vector search system.
+
+This quickstart guide shows you how to perform reranking with the Rerank endpoint.
+
+
+### Setup
+First, install the Cohere Python SDK with the following command.
+
+```bash
+pip install -U cohere
+```
+
+Next, import the library and create a client.
+
+
+
+
+```python PYTHON
+import cohere
+
+co = cohere.Client("COHERE_API_KEY") # Get your free API key here: https://dashboard.cohere.com/api-keys
+```
+
+
+
+
+```python PYTHON
+import cohere
+
+co = cohere.Client(
+ api_key="", # Leave this blank
+ base_url=""
+)
+```
+
+
+
+
+
+```python PYTHON
+import cohere
+
+co = cohere.SagemakerClient(
+ aws_region="AWS_REGION",
+ aws_access_key="AWS_ACCESS_KEY_ID",
+ aws_secret_key="AWS_SECRET_ACCESS_KEY",
+ aws_session_token="AWS_SESSION_TOKEN",
+)
+```
+
+
+
+
+```python PYTHON
+import cohere
+
+co = cohere.Client(
+ api_key="AZURE_API_KEY",
+ base_url="AZURE_ENDPOINT"
+)
+```
+
+
+
+
+### Retrieved Documents
+
+First, define the list of documents to be reranked.
+
+```python PYTHON
+documents = [
+ "Reimbursing Travel Expenses: Easily manage your travel expenses by submitting them through our finance tool. Approvals are prompt and straightforward.",
+ "Working from Abroad: Working remotely from another country is possible. Simply coordinate with your manager and ensure your availability during core hours.",
+ "Health and Wellness Benefits: We care about your well-being and offer gym memberships, on-site yoga classes, and comprehensive health insurance.",
+ "Performance Reviews Frequency: We conduct informal check-ins every quarter and formal performance reviews twice a year.",
+]
+```
+
+### Reranking
+
+Then, perform reranking by passing the documents and the user query to the Rerank endpoint.
+
+
+
+```python PYTHON
+# Add the user query
+query = "Are there fitness-related perks?"
+
+# Rerank the documents
+results = co.rerank(
+ model="rerank-v3.5",
+ query=query,
+ documents=documents,
+ top_n=2
+)
+
+for result in results.results:
+ print(result)
+```
+
+
+
+```python PYTHON
+# Add the user query
+query = "Are there fitness-related perks?"
+
+# Rerank the documents
+results = co.rerank(
+ model="rerank-v3.5",
+ query=query,
+ documents=documents,
+ top_n=2
+)
+
+for result in results.results:
+ print(result)
+```
+
+
+
+```python PYTHON
+# Add the user query
+query = "Are there fitness-related perks?"
+
+# Rerank the documents
+results = co.rerank(
+ model="YOUR_ENDPOINT_NAME",
+ query=query,
+ documents=documents,
+ top_n=2
+)
+
+for result in results.results:
+ print(result)
+```
+
+
+
+```python PYTHON
+# Add the user query
+query = "Are there fitness-related perks?"
+
+# Rerank the documents
+results = co.rerank(
+ query=query,
+ documents=documents,
+ top_n=2
+)
+
+for result in results.results:
+ print(result)
+```
+
+
+
+
+
+
+
+## Further Resources
+- [Rerank endpoint API reference](https://docs.cohere.com/v1/reference/rerank)
+- [Documentation on reranking](https://docs.cohere.com/v1/docs/rerank-overview)
+- [LLM University chapter on reranking](https://cohere.com/llmu/reranking)
\ No newline at end of file
diff --git a/fern/pages/get-started/quickstart/sem-search-quickstart.mdx b/fern/pages/get-started/quickstart/sem-search-quickstart.mdx
new file mode 100644
index 000000000..15136e6ed
--- /dev/null
+++ b/fern/pages/get-started/quickstart/sem-search-quickstart.mdx
@@ -0,0 +1,304 @@
+---
+title: Semantic Search
+slug: /docs/sem-search-quickstart
+
+description: "A quickstart guide for performing text semantic search with Cohere's Embed models (v1 API)."
+image: "../../../assets/images/f1cc130-cohere_meta_image.jpg"
+keywords: "Cohere, semantic search, text embeddings, embed models"
+---
+Cohere's embedding models are available via the Embed endpoint. This endpoint enables you to embed text documents (multilingual) and images into the vector space.
+
+Semantic search, powered by embeddings, enables applications to perform information retrieval based on the context or meaning of a document.
+
+This quickstart guide shows you how to perform semantic search with the Embed endpoint.
+
+
+### Setup
+First, install the Cohere Python SDK with the following command.
+
+```bash
+pip install -U cohere
+```
+
+Next, import the library and create a client.
+
+
+
+
+```python PYTHON
+import cohere
+
+co = cohere.Client("COHERE_API_KEY") # Get your free API key here: https://dashboard.cohere.com/api-keys
+```
+
+
+
+
+```python PYTHON
+import cohere
+
+co = cohere.Client(
+ api_key="", # Leave this blank
+ base_url=""
+)
+```
+
+
+
+
+
+```python PYTHON
+import cohere
+
+co = cohere.BedrockClient(
+ aws_region="AWS_REGION",
+ aws_access_key="AWS_ACCESS_KEY_ID",
+ aws_secret_key="AWS_SECRET_ACCESS_KEY",
+ aws_session_token="AWS_SESSION_TOKEN",
+)
+
+# Get the model name: https://docs.aws.amazon.com/bedrock/latest/userguide/models-supported.html
+```
+
+
+
+
+```python PYTHON
+import cohere
+
+co = cohere.SagemakerClient(
+ aws_region="AWS_REGION",
+ aws_access_key="AWS_ACCESS_KEY_ID",
+ aws_secret_key="AWS_SECRET_ACCESS_KEY",
+ aws_session_token="AWS_SESSION_TOKEN",
+)
+```
+
+
+
+
+```python PYTHON
+import cohere
+
+co = cohere.Client(
+ api_key="AZURE_API_KEY_COMMAND",
+ base_url="AZURE_ENDPOINT_COMMAND"
+)
+```
+
+
+
+
+### Document Embeddings
+First, embed the list of available documents using the Embed endpoint by specifying the `input_type` as `search_document`.
+
+
+
+```python PYTHON
+# Define the documents
+documents = [
+ "Joining Slack Channels: Be sure to join relevant channels to stay informed and engaged.",
+ "Finding Coffee Spots: For your caffeine fix, cross the street to the café for artisan coffee.",
+ "Working Hours Flexibility: While our core hours are 9 AM to 5 PM, we offer flexibility to adjust as needed.",
+]
+
+# Embed the documents
+doc_emb = co.embed(
+ model="embed-english-v3.0",
+ input_type="search_document",
+ texts=documents,
+ embedding_types=["float"]
+).embeddings.float
+```
+
+
+
+```python PYTHON
+# Define the documents
+documents = [
+ "Joining Slack Channels: Be sure to join relevant channels to stay informed and engaged.",
+ "Finding Coffee Spots: For your caffeine fix, cross the street to the café for artisan coffee.",
+ "Working Hours Flexibility: While our core hours are 9 AM to 5 PM, we offer flexibility to adjust as needed.",
+]
+
+# Embed the documents
+doc_emb = co.embed(
+ model="embed-english-v3.0",
+ input_type="search_document",
+ texts=documents,
+ embedding_types=["float"]
+).embeddings.float
+```
+
+
+
+```python PYTHON
+# Define the documents
+documents = [
+ "Joining Slack Channels: Be sure to join relevant channels to stay informed and engaged.",
+ "Finding Coffee Spots: For your caffeine fix, cross the street to the café for artisan coffee.",
+ "Working Hours Flexibility: While our core hours are 9 AM to 5 PM, we offer flexibility to adjust as needed.",
+]
+
+# Embed the documents
+doc_emb = co.embed(
+ model="YOUR_MODEL_NAME",
+ input_type="search_document",
+ texts=documents,
+ embedding_types=["float"]
+).embeddings.float
+```
+
+
+
+```python PYTHON
+# Define the documents
+documents = [
+ "Joining Slack Channels: Be sure to join relevant channels to stay informed and engaged.",
+ "Finding Coffee Spots: For your caffeine fix, cross the street to the café for artisan coffee.",
+ "Working Hours Flexibility: While our core hours are 9 AM to 5 PM, we offer flexibility to adjust as needed.",
+]
+
+# Embed the documents
+doc_emb = co.embed(
+ model="YOUR_ENDPOINT_NAME",
+ input_type="search_document",
+ texts=documents,
+ embedding_types=["float"]
+).embeddings.float
+```
+
+
+
+```python PYTHON
+# Define the documents
+documents = [
+ "Joining Slack Channels: Be sure to join relevant channels to stay informed and engaged.",
+ "Finding Coffee Spots: For your caffeine fix, cross the street to the café for artisan coffee.",
+ "Working Hours Flexibility: While our core hours are 9 AM to 5 PM, we offer flexibility to adjust as needed.",
+]
+
+# Embed the documents
+doc_emb = co.embed(
+ input_type="search_document",
+ texts=documents,
+ embedding_types=["float"]
+).embeddings.float
+```
+
+
+
+### Query Embedding
+Next, embed the user query using the Embed endpoint by specifying the `input_type` as `search_query`.
+
+
+
+```python PYTHON
+# Add the user query
+query = "Ways to connect with my teammates"
+
+# Embed the query
+query_emb = co.embed(
+ model="embed-english-v3.0",
+ input_type="search_query",
+ texts=[query],
+ embedding_types=["float"]
+).embeddings.float
+```
+
+
+
+```python PYTHON
+# Add the user query
+query = "Ways to connect with my teammates"
+
+# Embed the query
+query_emb = co.embed(
+ model="embed-english-v3.0",
+ input_type="search_query",
+ texts=[query],
+ embedding_types=["float"]
+).embeddings.float
+```
+
+
+
+```python PYTHON
+# Add the user query
+query = "Ways to connect with my teammates"
+
+# Embed the query
+query_emb = co.embed(
+ model="YOUR_MODEL_NAME",
+ input_type="search_query",
+ texts=[query],
+ embedding_types=["float"]
+).embeddings.float
+```
+
+
+
+```python PYTHON
+# Add the user query
+query = "Ways to connect with my teammates"
+
+# Embed the query
+query_emb = co.embed(
+ model="YOUR_ENDPOINT_NAME",
+ input_type="search_query",
+ texts=[query],
+ embedding_types=["float"]
+).embeddings.float
+```
+
+
+
+```python PYTHON
+# Add the user query
+query = "Ways to connect with my teammates"
+
+# Embed the query
+query_emb = co.embed(
+ input_type="search_query",
+ texts=[query],
+ embedding_types=["float"]
+).embeddings.float
+```
+
+
+
+### Semantic Search
+
+Then, perform semantic search by computing the similarity between the query embedding and the document embeddings, and then returning the most similar documents.
+
+```python PYTHON
+import numpy as np
+
+# Compute dot product similarity and display results
+def return_results(query_emb, doc_emb, documents):
+ n = 2 # customize your top N results
+ scores = np.dot(query_emb, np.transpose(doc_emb))[0]
+ max_idx = np.argsort(-scores)[:n]
+
+ for rank, idx in enumerate(max_idx):
+ print(f"Rank: {rank+1}")
+ print(f"Score: {scores[idx]}")
+ print(f"Document: {documents[idx]}\n")
+
+return_results(query_emb, doc_emb, documents)
+```
+```mdx wordWrap
+Rank: 1
+Score: 0.23645164410153752
+Document: Joining Slack Channels: Be sure to join relevant channels to stay informed and engaged.
+
+Rank: 2
+Score: 0.1505097876657395
+Document: Finding Coffee Spots: For your caffeine fix, cross the street to the café for artisan coffee.
+```
+
+
+## Further Resources
+- [Embed endpoint API reference](https://docs.cohere.com/v1/reference/embed)
+- [Documentation on embeddings](https://docs.cohere.com/v1/docs/embeddings)
+- [LLM University module on semantic search](https://cohere.com/llmu#semantic-search)
\ No newline at end of file
diff --git a/fern/pages/get-started/quickstart/text-gen-quickstart.mdx b/fern/pages/get-started/quickstart/text-gen-quickstart.mdx
new file mode 100644
index 000000000..5645dfeb8
--- /dev/null
+++ b/fern/pages/get-started/quickstart/text-gen-quickstart.mdx
@@ -0,0 +1,358 @@
+---
+title: Text Generation
+slug: /docs/text-gen-quickstart
+
+description: "A quickstart guide for performing text generation with Cohere's Command models (v1 API)."
+image: "../../../assets/images/f1cc130-cohere_meta_image.jpg"
+keywords: "Cohere, text generation, chatbot, command models"
+---
+
+Cohere's Command family of LLMs are available via the Chat endpoint. This endpoint enables you to build generative AI applications and facilitates a conversational interface for building chatbots.
+
+This quickstart guide shows you how to perform text generation with the Chat endpoint.
+
+
+### Setup
+First, install the Cohere Python SDK with the following command.
+
+```bash
+pip install -U cohere
+```
+
+Next, import the library and create a client.
+
+
+
+
+```python PYTHON
+import cohere
+
+co = cohere.Client("COHERE_API_KEY") # Get your free API key here: https://dashboard.cohere.com/api-keys
+```
+
+
+
+
+```python PYTHON
+import cohere
+
+co = cohere.Client(
+ api_key="", # Leave this blank
+ base_url=""
+)
+```
+
+
+
+
+
+```python PYTHON
+import cohere
+
+co = cohere.BedrockClient(
+ aws_region="AWS_REGION",
+ aws_access_key="AWS_ACCESS_KEY_ID",
+ aws_secret_key="AWS_SECRET_ACCESS_KEY",
+ aws_session_token="AWS_SESSION_TOKEN",
+)
+
+# Get the model name: https://docs.aws.amazon.com/bedrock/latest/userguide/models-supported.html
+```
+
+
+
+
+```python PYTHON
+import cohere
+
+co = cohere.SagemakerClient(
+ aws_region="AWS_REGION",
+ aws_access_key="AWS_ACCESS_KEY_ID",
+ aws_secret_key="AWS_SECRET_ACCESS_KEY",
+ aws_session_token="AWS_SESSION_TOKEN",
+)
+```
+
+
+
+
+```python PYTHON
+import cohere
+
+co = cohere.Client(
+ api_key="AZURE_API_KEY",
+ base_url="AZURE_ENDPOINT",
+)
+```
+
+
+
+
+
+### Basic Text Generation
+To perform a basic text generation, call the Chat endpoint by passing the `message` parameter containing the user message.
+
+
+
+```python PYTHON
+response = co.chat(
+ model="command-r-plus-08-2024",
+ message="I'm joining a new startup called Co1t today. Could you help me write a one-sentence introduction message to my teammates?"
+)
+
+print(response.text)
+```
+
+
+
+```python PYTHON
+response = co.chat(
+ message="I'm joining a new startup called Co1t today. Could you help me write a one-sentence introduction message to my teammates?"
+)
+
+print(response.text)
+```
+
+
+
+```python PYTHON
+response = co.chat(
+ model="YOUR_MODEL_NAME",
+ message="I'm joining a new startup called Co1t today. Could you help me write a one-sentence introduction message to my teammates?"
+)
+
+print(response.text)
+```
+
+
+
+```python PYTHON
+response = co.chat(
+ model="YOUR_ENDPOINT_NAME",
+ message="I'm joining a new startup called Co1t today. Could you help me write a one-sentence introduction message to my teammates?"
+)
+
+print(response.text)
+```
+
+
+
+```python PYTHON
+response = co.chat(
+ message="I'm joining a new startup called Co1t today. Could you help me write a one-sentence introduction message to my teammates?"
+)
+
+print(response.text)
+```
+
+
+
+```mdx wordWrap
+"Excited to be part of the Co1t team, I'm [Your Name], a [Your Role], passionate about [Your Area of Expertise] and looking forward to contributing to the company's success."
+```
+
+### State Management
+To maintain the state of a conversation, such as for building chatbots, append a sequence of `user` and `chatbot` messages to the `chat_history` list. You can also include a `preamble` parameter, which will act as a system message to set the context of the conversation.
+
+
+
+ ```python PYTHON
+ response = co.chat(
+ model="command-r-plus-08-2024",
+ preamble="You respond in concise sentences.",
+ chat_history=[
+ {
+ "role": "user",
+ "message": "Hello"
+ },
+ {
+ "role": "chatbot",
+ "message": "Hi, how can I help you today?"
+ }
+ ],
+ message="I'm joining a new startup called Co1t today. Could you help me write a one-sentence introduction message to my teammates?"
+ )
+
+ print(response.text)
+ ```
+
+
+
+ ```python PYTHON
+ response = co.chat(
+ preamble="You respond in concise sentences.",
+ chat_history=[
+ {
+ "role": "user",
+ "message": "Hello"
+ },
+ {
+ "role": "chatbot",
+ "message": "Hi, how can I help you today?"
+ }
+ ],
+ message="I'm joining a new startup called Co1t today. Could you help me write a one-sentence introduction message to my teammates?"
+ )
+
+ print(response.text)
+ ```
+
+
+
+ ```python PYTHON
+ response = co.chat(
+ model="YOUR_MODEL_NAME",
+ preamble="You respond in concise sentences.",
+ chat_history=[
+ {
+ "role": "user",
+ "message": "Hello"
+ },
+ {
+ "role": "chatbot",
+ "message": "Hi, how can I help you today?"
+ }
+ ],
+ message="I'm joining a new startup called Co1t today. Could you help me write a one-sentence introduction message to my teammates?"
+ )
+
+ print(response.text)
+ ```
+
+
+
+ ```python PYTHON
+ response = co.chat(
+ model="YOUR_ENDPOINT_NAME",
+ preamble="You respond in concise sentences.",
+ chat_history=[
+ {
+ "role": "user",
+ "message": "Hello"
+ },
+ {
+ "role": "chatbot",
+ "message": "Hi, how can I help you today?"
+ }
+ ],
+ message="I'm joining a new startup called Co1t today. Could you help me write a one-sentence introduction message to my teammates?"
+ )
+
+ print(response.text)
+ ```
+
+
+
+ ```python PYTHON
+ response = co.chat(
+ preamble="You respond in concise sentences.",
+ chat_history=[
+ {
+ "role": "user",
+ "message": "Hello"
+ },
+ {
+ "role": "chatbot",
+ "message": "Hi, how can I help you today?"
+ }
+ ],
+ message="I'm joining a new startup called Co1t today. Could you help me write a one-sentence introduction message to my teammates?"
+ )
+
+ print(response.text)
+ ```
+
+
+```mdx wordWrap
+"Excited to join the team at Co1t, looking forward to contributing my skills and collaborating with everyone!"
+```
+
+### Streaming
+
+To stream the generated text, call the Chat endpoint using `chat_stream` instead of `chat`. This returns a generator that yields `chunk` objects, which you can access the generated text from.
+
+
+
+```python PYTHON
+message = "I'm joining a new startup called Co1t today. Could you help me write a one-sentence introduction message to my teammates."
+
+response = co.chat_stream(
+ model="command-r-plus-08-2024",
+ message=message
+)
+
+for chunk in response:
+ if chunk.event_type == "text-generation":
+ print(chunk.text, end="")
+```
+
+
+
+```python PYTHON
+message = "I'm joining a new startup called Co1t today. Could you help me write a one-sentence introduction message to my teammates."
+
+response = co.chat_stream(
+ message=message
+)
+
+for chunk in response:
+ if chunk.event_type == "text-generation":
+ print(chunk.text, end="")
+```
+
+
+
+```python PYTHON
+message = "I'm joining a new startup called Co1t today. Could you help me write a one-sentence introduction message to my teammates."
+
+response = co.chat_stream(
+ model="YOUR_MODEL_NAME",
+ message=message
+)
+
+for chunk in response:
+ if chunk.event_type == "text-generation":
+ print(chunk.text, end="")
+```
+
+
+
+```python PYTHON
+message = "I'm joining a new startup called Co1t today. Could you help me write a one-sentence introduction message to my teammates."
+
+response = co.chat_stream(
+ model="YOUR_ENDPOINT_NAME",
+ message=message
+)
+
+for chunk in response:
+ if chunk.event_type == "text-generation":
+ print(chunk.text, end="")
+```
+
+
+
+```python PYTHON
+message = "I'm joining a new startup called Co1t today. Could you help me write a one-sentence introduction message to my teammates."
+
+response = co.chat_stream(
+ message=message
+)
+
+for chunk in response:
+ if chunk.event_type == "text-generation":
+ print(chunk.text, end="")
+```
+
+
+```mdx wordWrap
+"Excited to be part of the Co1t team, I'm [Your Name], a [Your Role/Position], looking forward to contributing my skills and collaborating with this talented group to drive innovation and success."
+```
+
+
+
+
+## Further Resources
+- [Chat endpoint API reference](https://docs.cohere.com/v1/reference/chat)
+- [Documentation on text generation](https://docs.cohere.com/v1/docs/introduction-to-text-generation-at-cohere)
+- [LLM University module on text generation](https://cohere.com/llmu#text-generation)
\ No newline at end of file
diff --git a/fern/pages/get-started/quickstart/tool-use-quickstart.mdx b/fern/pages/get-started/quickstart/tool-use-quickstart.mdx
new file mode 100644
index 000000000..e875c252b
--- /dev/null
+++ b/fern/pages/get-started/quickstart/tool-use-quickstart.mdx
@@ -0,0 +1,307 @@
+---
+title: Tool Use & Agents
+slug: /docs/tool-use-quickstart
+
+description: "A quickstart guide for using tool use and building agents with Cohere's Command models (v1 API)."
+image: "../../../assets/images/f1cc130-cohere_meta_image.jpg"
+keywords: "Cohere, tool use, agents, chatbot, command models"
+---
+
+Tool use enables developers to build agentic applications that connect to external tools, do reasoning, and perform actions.
+
+The Chat endpoint comes with built-in tool use capabilities such as function calling, multi-step reasoning, and citation generation.
+
+This quickstart guide shows you how to utilize tool use with the Chat endpoint.
+
+
+### Setup
+First, install the Cohere Python SDK with the following command.
+
+```bash
+pip install -U cohere
+```
+
+Next, import the library and create a client.
+
+
+
+
+```python PYTHON
+import cohere
+
+co = cohere.Client("COHERE_API_KEY") # Get your free API key here: https://dashboard.cohere.com/api-keys
+```
+
+
+
+
+```python PYTHON
+import cohere
+
+co = cohere.Client(
+ api_key="", # Leave this blank
+ base_url=""
+)
+```
+
+
+
+
+
+```python PYTHON
+import cohere
+
+co = cohere.BedrockClient(
+ aws_region="AWS_REGION",
+ aws_access_key="AWS_ACCESS_KEY_ID",
+ aws_secret_key="AWS_SECRET_ACCESS_KEY",
+ aws_session_token="AWS_SESSION_TOKEN",
+)
+
+# Get the model name: https://docs.aws.amazon.com/bedrock/latest/userguide/models-supported.html
+```
+
+
+
+
+```python PYTHON
+import cohere
+
+co = cohere.SagemakerClient(
+ aws_region="AWS_REGION",
+ aws_access_key="AWS_ACCESS_KEY_ID",
+ aws_secret_key="AWS_SECRET_ACCESS_KEY",
+ aws_session_token="AWS_SESSION_TOKEN",
+)
+```
+
+
+
+
+```python PYTHON
+import cohere
+
+co = cohere.Client(
+ api_key="AZURE_API_KEY",
+ base_url="AZURE_ENDPOINT",
+)
+```
+
+
+
+
+### Tool Definition
+First, we need to set up the tools. A tool can be any function or service that can receive and send objects.
+
+We also need to define the tool schemas in a format that can be passed to the Chat endpoint. The schema must contain the following fields: `name`, `description`, and `parameter_definitions`.
+
+```python PYTHON
+def get_weather(location):
+ # Implement your tool calling logic here
+ return {"temperature": "20C"}
+
+functions_map = {"get_weather": get_weather}
+
+tools = [
+ {
+ "name": "get_weather",
+ "description": "Gets the weather of a given location",
+ "parameter_definitions": {
+ "location": {
+ "description": "The location to get weather, example: San Francisco, CA",
+ "type": "str",
+ "required": True
+ }
+ }
+ },
+]
+```
+
+### Tool Calling
+Next, pass the tool schema to the Chat endpoint together with the user message.
+
+The LLM will then generate the tool calls (if any) and return the `tool_calls` object.
+
+
+
+```python PYTHON
+message = "What's the weather in Toronto?"
+
+response = co.chat(
+ model="command-r-plus-08-2024",
+ message=message,
+ tools=tools
+)
+
+print(response.tool_calls)
+```
+
+
+
+```python PYTHON
+message = "What's the weather in Toronto?"
+
+response = co.chat(
+ model="command-r-plus-08-2024",
+ message=message,
+ tools=tools
+)
+
+print(response.tool_calls)
+```
+
+
+
+```python PYTHON
+message = "What's the weather in Toronto?"
+
+response = co.chat(
+ model="YOUR_MODEL_NAME",
+ message=message,
+ tools=tools
+)
+
+print(response.tool_calls)
+```
+
+
+
+```python PYTHON
+message = "What's the weather in Toronto?"
+
+response = co.chat(
+ model="YOUR_ENDPOINT_NAME",
+ message=message,
+ tools=tools
+)
+
+print(response.tool_calls)
+```
+
+
+
+```python PYTHON
+message = "What's the weather in Toronto?"
+
+response = co.chat(
+ message=message,
+ tools=tools
+)
+
+print(response.tool_calls)
+```
+
+
+
+```mdx wordWrap
+[ToolCall(name='get_weather', parameters={'location': 'Toronto'})]
+```
+
+### Tool Execution
+Next, the tools called will execute based on the parameters generated in the tool calling step earlier.
+
+```python PYTHON
+tool_content = []
+if response.tool_calls:
+ for tc in response.tool_calls:
+ tool_call = {"name": tc.name, "parameters": tc.parameters}
+ tool_result = functions_map[tc.name](**tc.parameters)
+ tool_content.append({"call": tool_call, "outputs": [tool_result]})
+```
+
+### Response Generation
+The results are passed back to the LLM, which generates the final response.
+
+
+
+```python PYTHON
+response = co.chat(
+ model="command-r-plus-08-2024",
+ message="",
+ tools=tools,
+ tool_results=tool_content,
+ chat_history=response.chat_history
+)
+
+print(response.text)
+```
+
+
+
+```python PYTHON
+response = co.chat(
+ model="command-r-plus-08-2024",
+ message="",
+ tools=tools,
+ tool_results=tool_content,
+ chat_history=response.chat_history
+)
+
+print(response.text)
+```
+
+
+
+```python PYTHON
+response = co.chat(
+ model="YOUR_MODEL_NAME",
+ message="",
+ tools=tools,
+ tool_results=tool_content,
+ chat_history=response.chat_history
+)
+
+print(response.text)
+```
+
+
+
+```python PYTHON
+response = co.chat(
+ model="YOUR_ENDPOINT_NAME",
+ message="",
+ tools=tools,
+ tool_results=tool_content,
+ chat_history=response.chat_history
+)
+
+print(response.text)
+```
+
+
+
+```python PYTHON
+response = co.chat(
+ message="",
+ tools=tools,
+ tool_results=tool_content,
+ chat_history=response.chat_history
+)
+
+print(response.text)
+```
+
+
+
+```mdx wordWrap
+It is 20C in Toronto.
+```
+
+### Citation Generation
+The response object contains a `citations` field, which contains specific text spans from the documents on which the response is grounded.
+
+```python PYTHON
+if response.citations:
+ for citation in response.citations:
+ print(citation, "\n")
+```
+
+```mdx wordWrap
+start=6 end=9 text='20C' document_ids=['get_weather:0:2:0']
+```
+
+
+## Further Resources
+- [Chat endpoint API reference](https://docs.cohere.com/v1/reference/chat)
+- [Documentation on tool use](https://docs.cohere.com/v1/docs/tools)
+- [LLM University module on tool use](https://cohere.com/llmu#tool-use)
diff --git a/fern/pages/get-started/the-cohere-platform.mdx b/fern/pages/get-started/the-cohere-platform.mdx
index e6a828cc4..1f3bd31d9 100644
--- a/fern/pages/get-started/the-cohere-platform.mdx
+++ b/fern/pages/get-started/the-cohere-platform.mdx
@@ -10,9 +10,30 @@ keywords: 'natural language processing, generative AI, fine-tuning models'
createdAt: 'Thu Oct 13 2022 21:30:34 GMT+0000 (Coordinated Universal Time)'
updatedAt: 'Mon Jun 24 2024 09:16:55 GMT+0000 (Coordinated Universal Time)'
---
+
+
+## Large Language Models (LLMs)
+
+Language is important. It’s how we learn about the world (e.g. news, searching the web or Wikipedia), and also how we shape it (e.g. agreements, laws, or messages). Language is also how we connect and communicate — as people, and as groups and companies.
+
+Despite the rapid evolution of software, computers remain limited in their ability to deal with language. Software is great at searching for exact matches in text, but often fails at more advanced uses of language — ones that humans employ on a daily basis.
+
+There’s a clear need for more intelligent tools that better understand language.
+
+
+A recent breakthrough in artificial intelligence (AI) is the introduction of language processing technologies that enable us to build more intelligent systems with a richer understanding of language than ever before. Large pre-trained Transformer language models, or simply large language models, vastly extend the capabilities of what systems are able to do with text.
+
+
+
+
+Consider this: adding language models to empower Google Search was noted as “representing the biggest leap forward in the past five years, and one of the biggest leaps forward in the history of Search“. Microsoft also uses such models for every query in the Bing search engine.
+
+Despite the utility of these models, training and deploying them effectively is resource intensive, requiring a large investment of data, compute, and engineering resources.
+
+## Cohere's LLMs
+
Cohere allows developers and enterprises to build LLM-powered applications. We do that by creating world-class models, along with the supporting platform required to deploy them securely and privately.
-## Cohere's Large Language Models (LLMs)
The Command family of models includes [Command](https://cohere.com/models/command), [Command R](/docs/command-r), and [Command R+](/docs/command-r-plus). Together, they are the text-generation LLMs powering conversational agents, summarization, copywriting, and similar use cases. They work through the [Chat](/reference/chat) endpoint, which can be used with or without [retrieval augmented generation](/docs/retrieval-augmented-generation-rag) RAG.
@@ -25,7 +46,7 @@ The Command family of models includes [Command](https://cohere.com/models/comman
[Click here](/docs/foundation-models) to learn more about Cohere foundation models.
-## These LLMs Make it Easy to Build Conversational Agents (and Other LLM-powered Apps)
+## Example Applications
Try [the Chat UI](https://coral.cohere.com) to see what an LLM-powered conversational agent can look like. It is able to converse, summarize text, and write emails and articles.
@@ -50,7 +71,7 @@ What’s more, advanced RAG capabilities allow you to see what underlying query
[Click here](/docs/serving-platform) to learn more about the Cohere serving platform.
-### Use Language Models to Build Better Search and RAG Systems
+### Advanced Search & Retrieval
Embeddings enable you to search based on what a phrase _means_ rather than simply what keywords it _contains_, leading to search systems that incorporate context and user intent better than anything that has come before.
@@ -58,13 +79,13 @@ Embeddings enable you to search based on what a phrase _means_ rather than simpl
Learn more about semantic search [here](https://cohere.com/llmu/what-is-semantic-search).
-## Create Fine-Tuned Models with Ease
+## Fine-Tuning
To [create a fine-tuned model](/docs/fine-tuning), simply upload a dataset and hold on while we train a custom model and then deploy it for you. Fine-tuning can be done with [generative models](/docs/generate-fine-tuning), [multi-label classification models](/docs/classify-fine-tuning), [rerank models](/docs/rerank-fine-tuning), and [chat models](/docs/chat-fine-tuning).
-## Where you can access Cohere Models
+## Accessing Cohere Models
Depending on your privacy/security requirements there are a number of ways to access Cohere:
@@ -79,6 +100,6 @@ Depending on your privacy/security requirements there are a number of ways to ac
- On-premise: if your organization deals with sensitive data that cannot live on a cloud we also offer the option for fully-private deployment on your own infrastructure. Please [contact sales](emailto:team@cohere.com) for information.
-### Let us Know What You’re Making
+## Let us Know What You’re Making
We hope this overview has whetted your appetite for building with our generative AI models. Reach out to us on [Discord](https://discord.com/invite/co-mmunity) with any questions or to showcase your projects – we love hearing from the Cohere community!
diff --git a/fern/pages/v2/get-started/quickstart/rag-quickstart.mdx b/fern/pages/v2/get-started/quickstart/rag-quickstart.mdx
new file mode 100644
index 000000000..3e5e9d5f3
--- /dev/null
+++ b/fern/pages/v2/get-started/quickstart/rag-quickstart.mdx
@@ -0,0 +1,234 @@
+---
+title: Retrieval Augmented Generation (RAG)
+slug: /docs/v2/rag-quickstart
+
+description: "A quickstart guide for performing retrieval augmented generation (RAG) with Cohere's Command models (v2 API)."
+image: "../../../../assets/images/f1cc130-cohere_meta_image.jpg"
+keywords: "Cohere, RAG, retrieval augmented generation, chatbot, command models"
+---
+Retrieval Augmented Generation (RAG) enables an LLM to ground its responses on external documents, thus improving the accuracy of its responses and minimizing hallucinations.
+
+The Chat endpoint comes with built-in RAG capabilities such as document grounding and citation generation.
+
+This quickstart guide shows you how to perform RAG with the Chat endpoint.
+
+
+### Setup
+First, install the Cohere Python SDK with the following command.
+
+```bash
+pip install -U cohere
+```
+
+Next, import the library and create a client.
+
+
+
+
+```python PYTHON
+import cohere
+
+co = cohere.ClientV2("COHERE_API_KEY") # Get your free API key here: https://dashboard.cohere.com/api-keys
+```
+
+
+
+
+```python PYTHON
+import cohere
+
+co = cohere.ClientV2(
+ api_key="", # Leave this blank
+ base_url=""
+)
+```
+
+
+
+
+
+```python PYTHON
+import cohere
+
+co = cohere.BedrockClientV2(
+ aws_region="AWS_REGION",
+ aws_access_key="AWS_ACCESS_KEY_ID",
+ aws_secret_key="AWS_SECRET_ACCESS_KEY",
+ aws_session_token="AWS_SESSION_TOKEN",
+)
+
+# Get the model name: https://docs.aws.amazon.com/bedrock/latest/userguide/models-supported.html
+```
+
+
+
+
+```python PYTHON
+import cohere
+
+co = cohere.SagemakerClientV2(
+ aws_region="AWS_REGION",
+ aws_access_key="AWS_ACCESS_KEY_ID",
+ aws_secret_key="AWS_SECRET_ACCESS_KEY",
+ aws_session_token="AWS_SESSION_TOKEN",
+)
+```
+
+
+
+
+```python PYTHON
+import cohere
+
+co = cohere.ClientV2(
+ api_key="AZURE_API_KEY",
+ base_url="AZURE_ENDPOINT",
+)
+```
+
+
+
+
+### Documents
+
+First, define the documents that will passed as the context for RAG. These documents are typically retrieved from sources such as vector databases via semantic search, or any system that can retrieve unstructured data given a user query.
+
+Each document is a `data` object that can take any number of fields e.g. `title`, `url`, `text`, etc.
+
+```python PYTHON
+documents = [
+ {
+ "data": {
+ "text": "Reimbursing Travel Expenses: Easily manage your travel expenses by submitting them through our finance tool. Approvals are prompt and straightforward."
+ }
+ },
+ {
+ "data": {
+ "text": "Working from Abroad: Working remotely from another country is possible. Simply coordinate with your manager and ensure your availability during core hours."
+ }
+ },
+ {
+ "data": {
+ "text": "Health and Wellness Benefits: We care about your well-being and offer gym memberships, on-site yoga classes, and comprehensive health insurance."
+ }
+ },
+]
+
+```
+### Response Generation
+
+Next, call the Chat API by passing the documents in the `documents` parameter. This tells the model to run in RAG-mode and use these documents as the context in its response.
+
+
+
+```python PYTHON
+# Add the user query
+query = "Are there health benefits?"
+
+# Generate the response
+response = co.chat(
+ model="command-r-plus-08-2024",
+ messages=[{"role": "user", "content": query}],
+ documents=documents
+)
+
+# Display the response
+print(response.message.content[0].text)
+```
+
+
+
+```python PYTHON
+# Add the user query
+query = "Are there health benefits?"
+
+# Generate the response
+response = co.chat(
+ model="command-r-plus-08-2024",
+ messages=[{"role": "user", "content": query}],
+ documents=documents
+)
+
+# Display the response
+print(response.message.content[0].text)
+```
+
+
+
+```python PYTHON
+# Add the user query
+query = "Are there health benefits?"
+
+# Generate the response
+response = co.chat(
+ model="YOUR_MODEL_NAME",
+ messages=[{"role": "user", "content": query}],
+ documents=documents
+)
+
+# Display the response
+print(response.message.content[0].text)
+```
+
+
+
+```python PYTHON
+# Add the user query
+query = "Are there health benefits?"
+
+# Generate the response
+response = co.chat(
+ model="YOUR_ENDPOINT_NAME",
+ messages=[{"role": "user", "content": query}],
+ documents=documents
+)
+
+# Display the response
+print(response.message.content[0].text)
+```
+
+
+
+```python PYTHON
+# Add the user query
+query = "Are there health benefits?"
+
+# Generate the response
+response = co.chat(
+ model="model", # Pass a dummy string
+ messages=[{"role": "user", "content": query}],
+ documents=documents
+)
+
+# Display the response
+print(response.message.content[0].text)
+```
+
+
+
+
+```mdx wordWrap
+Yes, there are health benefits. We offer gym memberships, on-site yoga classes, and comprehensive health insurance.
+```
+
+### Citation Generation
+
+The response object contains a `citations` field, which contains specific text spans from the documents on which the response is grounded.
+
+```python PYTHON
+if response.message.citations:
+ for citation in response.message.citations:
+ print(citation, "\n")
+```
+
+```mdx wordWrap
+start=14 end=88 text='gym memberships, on-site yoga classes, and comprehensive health insurance.' document_ids=['doc_1']
+
+```
+
+
+
+## Further Resources
+- [Chat endpoint API reference](https://docs.cohere.com/reference/chat)
+- [Documentation on RAG](https://docs.cohere.com/docs/retrieval-augmented-generation-rag)
+- [LLM University module on RAG](https://cohere.com/llmu#rag)
diff --git a/fern/pages/v2/get-started/quickstart/reranking-quickstart.mdx b/fern/pages/v2/get-started/quickstart/reranking-quickstart.mdx
new file mode 100644
index 000000000..a6af14c89
--- /dev/null
+++ b/fern/pages/v2/get-started/quickstart/reranking-quickstart.mdx
@@ -0,0 +1,206 @@
+---
+title: Reranking
+slug: /docs/v2/reranking-quickstart
+
+description: "A quickstart guide for performing reranking with Cohere's Reranking models (v2 API)."
+image: "../../../../assets/images/f1cc130-cohere_meta_image.jpg"
+keywords: "Cohere, reranking, semantic search, rerank models"
+---
+Cohere's reranking models are available via the Rerank endpoint. This endpoint provides a powerful semantic boost to the search quality of any keyword or vector search system.
+
+This quickstart guide shows you how to perform reranking with the Rerank endpoint.
+
+
+### Setup
+First, install the Cohere Python SDK with the following command.
+
+```bash
+pip install -U cohere
+```
+
+Next, import the library and create a client.
+
+
+
+
+```python PYTHON
+import cohere
+
+co = cohere.ClientV2("COHERE_API_KEY") # Get your free API key here: https://dashboard.cohere.com/api-keys
+```
+
+
+
+
+```python PYTHON
+import cohere
+
+co = cohere.ClientV2(
+ api_key="", # Leave this blank
+ base_url=""
+)
+```
+
+
+
+
+
+```python PYTHON
+import cohere
+
+co = cohere.BedrockClientV2(
+ aws_region="AWS_REGION",
+ aws_access_key="AWS_ACCESS_KEY_ID",
+ aws_secret_key="AWS_SECRET_ACCESS_KEY",
+ aws_session_token="AWS_SESSION_TOKEN",
+)
+
+# Get the model name: https://docs.aws.amazon.com/bedrock/latest/userguide/models-supported.html
+```
+
+
+
+
+```python PYTHON
+import cohere
+
+co = cohere.SagemakerClientV2(
+ aws_region="AWS_REGION",
+ aws_access_key="AWS_ACCESS_KEY_ID",
+ aws_secret_key="AWS_SECRET_ACCESS_KEY",
+ aws_session_token="AWS_SESSION_TOKEN",
+)
+```
+
+
+
+
+```python PYTHON
+import cohere
+
+co = cohere.ClientV2(
+ api_key="AZURE_API_KEY",
+ base_url="AZURE_ENDPOINT",
+)
+```
+
+
+
+
+### Retrieved Documents
+
+First, define the list of documents to be reranked.
+
+```python PYTHON
+documents = [
+ "Reimbursing Travel Expenses: Easily manage your travel expenses by submitting them through our finance tool. Approvals are prompt and straightforward.",
+ "Working from Abroad: Working remotely from another country is possible. Simply coordinate with your manager and ensure your availability during core hours.",
+ "Health and Wellness Benefits: We care about your well-being and offer gym memberships, on-site yoga classes, and comprehensive health insurance.",
+ "Performance Reviews Frequency: We conduct informal check-ins every quarter and formal performance reviews twice a year.",
+]
+```
+
+### Reranking
+
+Then, perform reranking by passing the documents and the user query to the Rerank endpoint.
+
+
+
+```python PYTHON
+# Add the user query
+query = "Are there fitness-related perks?"
+
+# Rerank the documents
+results = co.rerank(
+ model="rerank-v3.5",
+ query=query,
+ documents=documents,
+ top_n=2
+)
+
+for result in results.results:
+ print(result)
+```
+
+
+
+```python PYTHON
+# Add the user query
+query = "Are there fitness-related perks?"
+
+# Rerank the documents
+results = co.rerank(
+ model="rerank-v3.5",
+ query=query,
+ documents=documents,
+ top_n=2
+)
+
+for result in results.results:
+ print(result)
+```
+
+
+
+```python PYTHON
+# Add the user query
+query = "Are there fitness-related perks?"
+
+# Rerank the documents
+results = co.rerank(
+ model="YOUR_MODEL_NAME",
+ query=query,
+ documents=documents,
+ top_n=2
+)
+
+for result in results.results:
+ print(result)
+```
+
+
+
+```python PYTHON
+# Add the user query
+query = "Are there fitness-related perks?"
+
+# Rerank the documents
+results = co.rerank(
+ model="YOUR_ENDPOINT_NAME",
+ query=query,
+ documents=documents,
+ top_n=2
+)
+
+for result in results.results:
+ print(result)
+```
+
+
+
+```python PYTHON
+# Add the user query
+query = "Are there fitness-related perks?"
+
+# Rerank the documents
+results = co.rerank(
+ model="model", # Pass a dummy string
+ query=query,
+ documents=documents,
+ top_n=2
+)
+
+for result in results.results:
+ print(result)
+```
+
+
+
+
+
+
+
+## Further Resources
+- [Rerank endpoint API reference](https://docs.cohere.com/reference/rerank)
+- [Documentation on reranking](https://docs.cohere.com/docs/rerank-overview)
+- [LLM University chapter on reranking](https://cohere.com/llmu/reranking)
diff --git a/fern/pages/v2/get-started/quickstart/sem-search-quickstart.mdx b/fern/pages/v2/get-started/quickstart/sem-search-quickstart.mdx
new file mode 100644
index 000000000..aaf4dd9a7
--- /dev/null
+++ b/fern/pages/v2/get-started/quickstart/sem-search-quickstart.mdx
@@ -0,0 +1,304 @@
+---
+title: Semantic Search
+slug: /docs/v2/sem-search-quickstart
+
+description: "A quickstart guide for performing text semantic search with Cohere's Embed models (v2 API)."
+image: "../../../../assets/images/f1cc130-cohere_meta_image.jpg"
+keywords: "Cohere, semantic search, text embeddings, embed models"
+---
+Cohere's embedding models are available via the Embed endpoint. This endpoint enables you to embed text documents (multilingual) and images into the vector space.
+
+Semantic search, powered by embeddings, enables applications to perform information retrieval based on the context or meaning of a document.
+
+This quickstart guide shows you how to perform semantic search with the Embed endpoint.
+
+
+### Setup
+First, install the Cohere Python SDK with the following command.
+
+```bash
+pip install -U cohere
+```
+
+Next, import the library and create a client.
+
+
+
+
+```python PYTHON
+import cohere
+
+co = cohere.ClientV2("COHERE_API_KEY") # Get your free API key here: https://dashboard.cohere.com/api-keys
+```
+
+
+
+
+```python PYTHON
+import cohere
+
+co = cohere.ClientV2(
+ api_key="", # Leave this blank
+ base_url=""
+)
+```
+
+
+
+
+
+```python PYTHON
+import cohere
+
+co = cohere.BedrockClientV2(
+ aws_region="AWS_REGION",
+ aws_access_key="AWS_ACCESS_KEY_ID",
+ aws_secret_key="AWS_SECRET_ACCESS_KEY",
+ aws_session_token="AWS_SESSION_TOKEN",
+)
+
+# Get the model name: https://docs.aws.amazon.com/bedrock/latest/userguide/models-supported.html
+```
+
+
+
+
+```python PYTHON
+import cohere
+
+co = cohere.SagemakerClientV2(
+ aws_region="AWS_REGION",
+ aws_access_key="AWS_ACCESS_KEY_ID",
+ aws_secret_key="AWS_SECRET_ACCESS_KEY",
+ aws_session_token="AWS_SESSION_TOKEN",
+)
+```
+
+
+
+
+```python PYTHON
+import cohere
+
+co = cohere.ClientV2(
+ api_key="AZURE_API_KEY",
+ base_url="AZURE_ENDPOINT",
+)
+```
+
+
+
+
+### Document Embeddings
+First, embed the list of available documents using the Embed endpoint by specifying the `input_type` as `search_document`.
+
+
+
+```python PYTHON
+# Define the documents
+documents = [
+ "Joining Slack Channels: Be sure to join relevant channels to stay informed and engaged.",
+ "Finding Coffee Spots: For your caffeine fix, cross the street to the café for artisan coffee.",
+ "Working Hours Flexibility: While our core hours are 9 AM to 5 PM, we offer flexibility to adjust as needed.",
+]
+
+# Embed the documents
+doc_emb = co.embed(
+ model="embed-english-v3.0",
+ input_type="search_document",
+ texts=documents,
+ embedding_types=["float"]
+).embeddings.float
+```
+
+
+
+```python PYTHON
+# Define the documents
+documents = [
+ "Joining Slack Channels: Be sure to join relevant channels to stay informed and engaged.",
+ "Finding Coffee Spots: For your caffeine fix, cross the street to the café for artisan coffee.",
+ "Working Hours Flexibility: While our core hours are 9 AM to 5 PM, we offer flexibility to adjust as needed.",
+]
+
+# Embed the documents
+doc_emb = co.embed(
+ model="embed-english-v3.0",
+ input_type="search_document",
+ texts=documents,
+ embedding_types=["float"]
+).embeddings.float
+```
+
+
+
+```python PYTHON
+# Define the documents
+documents = [
+ "Joining Slack Channels: Be sure to join relevant channels to stay informed and engaged.",
+ "Finding Coffee Spots: For your caffeine fix, cross the street to the café for artisan coffee.",
+ "Working Hours Flexibility: While our core hours are 9 AM to 5 PM, we offer flexibility to adjust as needed.",
+]
+
+# Embed the documents
+doc_emb = co.embed(
+ model="YOUR_MODEL_NAME",
+ input_type="search_document",
+ texts=documents,
+ embedding_types=["float"]
+).embeddings.float
+```
+
+
+
+```python PYTHON
+# Define the documents
+documents = [
+ "Joining Slack Channels: Be sure to join relevant channels to stay informed and engaged.",
+ "Finding Coffee Spots: For your caffeine fix, cross the street to the café for artisan coffee.",
+ "Working Hours Flexibility: While our core hours are 9 AM to 5 PM, we offer flexibility to adjust as needed.",
+]
+
+# Embed the documents
+doc_emb = co.embed(
+ model="YOUR_ENDPOINT_NAME",
+ input_type="search_document",
+ texts=documents,
+ embedding_types=["float"]
+).embeddings.float
+```
+
+
+
+```python PYTHON
+# Define the documents
+documents = [
+ "Joining Slack Channels: Be sure to join relevant channels to stay informed and engaged.",
+ "Finding Coffee Spots: For your caffeine fix, cross the street to the café for artisan coffee.",
+ "Working Hours Flexibility: While our core hours are 9 AM to 5 PM, we offer flexibility to adjust as needed.",
+]
+
+# Embed the documents
+doc_emb = co.embed(
+ input_type="search_document",
+ texts=documents,
+ embedding_types=["float"]
+).embeddings.float
+```
+
+
+
+### Query Embedding
+Next, embed the user query using the Embed endpoint by specifying the `input_type` as `search_query`.
+
+
+
+```python PYTHON
+# Add the user query
+query = "Ways to connect with my teammates"
+
+# Embed the query
+query_emb = co.embed(
+ model="embed-english-v3.0",
+ input_type="search_query",
+ texts=[query],
+ embedding_types=["float"]
+).embeddings.float
+```
+
+
+
+```python PYTHON
+# Add the user query
+query = "Ways to connect with my teammates"
+
+# Embed the query
+query_emb = co.embed(
+ model="embed-english-v3.0",
+ input_type="search_query",
+ texts=[query],
+ embedding_types=["float"]
+).embeddings.float
+```
+
+
+
+```python PYTHON
+# Add the user query
+query = "Ways to connect with my teammates"
+
+# Embed the query
+query_emb = co.embed(
+ model="YOUR_MODEL_NAME",
+ input_type="search_query",
+ texts=[query],
+ embedding_types=["float"]
+).embeddings.float
+```
+
+
+
+```python PYTHON
+# Add the user query
+query = "Ways to connect with my teammates"
+
+# Embed the query
+query_emb = co.embed(
+ model="YOUR_ENDPOINT_NAME",
+ input_type="search_query",
+ texts=[query],
+ embedding_types=["float"]
+).embeddings.float
+```
+
+
+
+```python PYTHON
+# Add the user query
+query = "Ways to connect with my teammates"
+
+# Embed the query
+query_emb = co.embed(
+ input_type="search_query",
+ texts=[query],
+ embedding_types=["float"]
+).embeddings.float
+```
+
+
+
+### Semantic Search
+
+Then, perform semantic search by computing the similarity between the query embedding and the document embeddings, and then returning the most similar documents.
+
+```python PYTHON
+import numpy as np
+
+# Compute dot product similarity and display results
+def return_results(query_emb, doc_emb, documents):
+ n = 2 # customize your top N results
+ scores = np.dot(query_emb, np.transpose(doc_emb))[0]
+ max_idx = np.argsort(-scores)[:n]
+
+ for rank, idx in enumerate(max_idx):
+ print(f"Rank: {rank+1}")
+ print(f"Score: {scores[idx]}")
+ print(f"Document: {documents[idx]}\n")
+
+return_results(query_emb, doc_emb, documents)
+```
+```mdx wordWrap
+Rank: 1
+Score: 0.23645164410153752
+Document: Joining Slack Channels: Be sure to join relevant channels to stay informed and engaged.
+
+Rank: 2
+Score: 0.1505097876657395
+Document: Finding Coffee Spots: For your caffeine fix, cross the street to the café for artisan coffee.
+```
+
+
+## Further Resources
+- [Embed endpoint API reference](https://docs.cohere.com/reference/embed)
+- [Documentation on embeddings](https://docs.cohere.com/docs/embeddings)
+- [LLM University module on semantic search](https://cohere.com/llmu#semantic-search)
\ No newline at end of file
diff --git a/fern/pages/v2/get-started/quickstart/text-gen-quickstart.mdx b/fern/pages/v2/get-started/quickstart/text-gen-quickstart.mdx
new file mode 100644
index 000000000..04202263f
--- /dev/null
+++ b/fern/pages/v2/get-started/quickstart/text-gen-quickstart.mdx
@@ -0,0 +1,434 @@
+---
+title: Text Generation
+slug: /docs/v2/text-gen-quickstart
+
+description: "A quickstart guide for performing text generation with Cohere's Command models (v2 API)."
+image: "../../../../assets/images/f1cc130-cohere_meta_image.jpg"
+keywords: "Cohere, text generation, chatbot, command models"
+---
+Cohere's Command family of LLMs are available via the Chat endpoint. This endpoint enables you to build generative AI applications and facilitates a conversational interface for building chatbots.
+
+This quickstart guide shows you how to perform text generation with the Chat endpoint.
+
+
+### Setup
+First, install the Cohere Python SDK with the following command.
+
+```bash
+pip install -U cohere
+```
+
+Next, import the library and create a client.
+
+
+
+
+```python PYTHON
+import cohere
+
+co = cohere.ClientV2("COHERE_API_KEY") # Get your free API key here: https://dashboard.cohere.com/api-keys
+```
+
+
+
+
+```python PYTHON
+import cohere
+
+co = cohere.ClientV2(
+ api_key="", # Leave this blank
+ base_url=""
+)
+```
+
+
+
+
+
+```python PYTHON
+import cohere
+
+co = cohere.BedrockClientV2(
+ aws_region="AWS_REGION",
+ aws_access_key="AWS_ACCESS_KEY_ID",
+ aws_secret_key="AWS_SECRET_ACCESS_KEY",
+ aws_session_token="AWS_SESSION_TOKEN",
+)
+
+# Get the model name: https://docs.aws.amazon.com/bedrock/latest/userguide/models-supported.html
+```
+
+
+
+
+```python PYTHON
+import cohere
+
+co = cohere.SagemakerClientV2(
+ aws_region="AWS_REGION",
+ aws_access_key="AWS_ACCESS_KEY_ID",
+ aws_secret_key="AWS_SECRET_ACCESS_KEY",
+ aws_session_token="AWS_SESSION_TOKEN",
+)
+```
+
+
+
+
+```python PYTHON
+import cohere
+
+co = cohere.ClientV2(
+ api_key="AZURE_API_KEY",
+ base_url="AZURE_ENDPOINT",
+)
+```
+
+
+
+
+
+
+### Basic Text Generation
+To perform a basic text generation, call the Chat endpoint by passing the `messages` parameter containing the `user` message.
+
+
+
+```python PYTHON
+response = co.chat(
+ model="command-r-plus-08-2024",
+ messages=[
+ {
+ "role": "user",
+ "content": "I'm joining a new startup called Co1t today. Could you help me write a one-sentence introduction message to my teammates.",
+ }
+ ],
+)
+
+print(response.message.content[0].text)
+```
+
+
+
+```python PYTHON
+response = co.chat(
+ model="command-r-plus-08-2024",
+ messages=[
+ {
+ "role": "user",
+ "content": "I'm joining a new startup called Co1t today. Could you help me write a one-sentence introduction message to my teammates.",
+ }
+ ],
+)
+
+print(response.message.content[0].text)
+```
+
+
+
+```python PYTHON
+response = co.chat(
+ model="YOUR_MODEL_NAME",
+ messages=[
+ {
+ "role": "user",
+ "content": "I'm joining a new startup called Co1t today. Could you help me write a one-sentence introduction message to my teammates.",
+ }
+ ],
+)
+
+print(response.message.content[0].text)
+```
+
+
+
+```python PYTHON
+response = co.chat(
+ model="YOUR_ENDPOINT_NAME",
+ messages=[
+ {
+ "role": "user",
+ "content": "I'm joining a new startup called Co1t today. Could you help me write a one-sentence introduction message to my teammates.",
+ }
+ ],
+)
+
+print(response.message.content[0].text)
+```
+
+
+
+```python PYTHON
+response = co.chat(
+ model="model", # Pass a dummy string
+ messages=[
+ {
+ "role": "user",
+ "content": "I'm joining a new startup called Co1t today. Could you help me write a one-sentence introduction message to my teammates.",
+ }
+ ],
+)
+```
+
+
+
+
+```mdx wordWrap
+"Excited to be part of the Co1t team, I'm [Your Name], a [Your Role], passionate about [Your Area of Expertise] and looking forward to contributing to the company's success."
+```
+
+### State Management
+To maintain the state of a conversation, such as for building chatbots, append a sequence of `user` and `assistant` messages to the `messages` list. You can also include a `system` message at the start of the list to set the context of the conversation.
+
+
+
+```python PYTHON
+response = co.chat(
+ model="command-r-plus-08-2024",
+ messages=[
+ {
+ "role": "system",
+ "content": "You respond in concise sentences."
+ },
+ {
+ "role": "user",
+ "content": "Hello"
+ },
+ {
+ "role": "assistant",
+ "content": "Hi, how can I help you today?"
+ },
+ {
+ "role": "user",
+ "content": "I'm joining a new startup called Co1t today. Could you help me write a one-sentence introduction message to my teammates."
+ }
+ ]
+)
+
+print(response.message.content[0].text)
+```
+
+
+
+```python PYTHON
+response = co.chat(
+ model="command-r-plus-08-2024",
+ messages=[
+ {
+ "role": "system",
+ "content": "You respond in concise sentences."
+ },
+ {
+ "role": "user",
+ "content": "Hello"
+ },
+ {
+ "role": "assistant",
+ "content": "Hi, how can I help you today?"
+ },
+ {
+ "role": "user",
+ "content": "I'm joining a new startup called Co1t today. Could you help me write a one-sentence introduction message to my teammates."
+ }
+ ]
+)
+
+print(response.message.content[0].text)
+```
+
+
+
+```python PYTHON
+response = co.chat(
+ model="YOUR_MODEL_NAME",
+ messages=[
+ {
+ "role": "system",
+ "content": "You respond in concise sentences."
+ },
+ {
+ "role": "user",
+ "content": "Hello"
+ },
+ {
+ "role": "assistant",
+ "content": "Hi, how can I help you today?"
+ },
+ {
+ "role": "user",
+ "content": "I'm joining a new startup called Co1t today. Could you help me write a one-sentence introduction message to my teammates."
+ }
+ ]
+)
+```
+
+
+
+```python PYTHON
+response = co.chat(
+ model="YOUR_ENDPOINT_NAME",
+ messages=[
+ {
+ "role": "system",
+ "content": "You respond in concise sentences."
+ },
+ {
+ "role": "user",
+ "content": "Hello"
+ },
+ {
+ "role": "assistant",
+ "content": "Hi, how can I help you today?"
+ },
+ {
+ "role": "user",
+ "content": "I'm joining a new startup called Co1t today. Could you help me write a one-sentence introduction message to my teammates."
+ }
+ ]
+)
+```
+
+
+
+```python PYTHON
+response = co.chat(
+ model="model", # Pass a dummy string
+ messages=[
+ {
+ "role": "system",
+ "content": "You respond in concise sentences."
+ },
+ {
+ "role": "user",
+ "content": "Hello"
+ },
+ {
+ "role": "assistant",
+ "content": "Hi, how can I help you today?"
+ },
+ {
+ "role": "user",
+ "content": "I'm joining a new startup called Co1t today. Could you help me write a one-sentence introduction message to my teammates."
+ }
+ ]
+)
+```
+
+
+
+```mdx wordWrap
+"Excited to join the team at Co1t, looking forward to contributing my skills and collaborating with everyone!"
+```
+
+### Streaming
+
+To stream text generation, call the Chat endpoint using `chat_stream` instead of `chat`. This returns a generator that yields `chunk` objects, which you can access the generated text from.
+
+
+
+```python PYTHON
+res = co.chat_stream(
+ model="command-r-plus-08-2024",
+ messages=[
+ {
+ "role": "user",
+ "content": "I'm joining a new startup called Co1t today. Could you help me write a one-sentence introduction message to my teammates.",
+ }
+ ]
+)
+
+for chunk in res:
+ if chunk:
+ if chunk.type == "content-delta":
+ print(chunk.delta.message.content.text, end="")
+```
+
+
+
+```python PYTHON
+res = co.chat_stream(
+ model="command-r-plus-08-2024",
+ messages=[
+ {
+ "role": "user",
+ "content": "I'm joining a new startup called Co1t today. Could you help me write a one-sentence introduction message to my teammates.",
+ }
+ ]
+)
+
+for chunk in res:
+ if chunk:
+ if chunk.type == "content-delta":
+ print(chunk.delta.message.content.text, end="")
+```
+
+
+
+```python PYTHON
+res = co.chat_stream(
+ model="YOUR_MODEL_NAME",
+ messages=[
+ {
+ "role": "user",
+ "content": "I'm joining a new startup called Co1t today. Could you help me write a one-sentence introduction message to my teammates.",
+ }
+ ]
+)
+
+for chunk in res:
+ if chunk:
+ if chunk.type == "content-delta":
+ print(chunk.delta.message.content.text, end="")
+```
+
+
+
+```python PYTHON
+res = co.chat_stream(
+ model="YOUR_ENDPOINT_NAME",
+ messages=[
+ {
+ "role": "user",
+ "content": "I'm joining a new startup called Co1t today. Could you help me write a one-sentence introduction message to my teammates.",
+ }
+ ]
+)
+
+for chunk in res:
+ if chunk:
+ if chunk.type == "content-delta":
+ print(chunk.delta.message.content.text, end="")
+```
+
+
+
+```python PYTHON
+res = co.chat_stream(
+ model="model", # Pass a dummy string
+ messages=[
+ {
+ "role": "user",
+ "content": "I'm joining a new startup called Co1t today. Could you help me write a one-sentence introduction message to my teammates.",
+ }
+ ]
+)
+
+for chunk in res:
+ if chunk:
+ if chunk.type == "content-delta":
+ print(chunk.delta.message.content.text, end="")
+```
+
+
+
+```mdx wordWrap
+"Excited to be part of the Co1t team, I'm [Your Name], a [Your Role/Position], looking forward to contributing my skills and collaborating with this talented group to drive innovation and success."
+```
+
+
+
+
+## Further Resources
+- [Chat endpoint API reference](https://docs.cohere.com/reference/chat)
+- [Documentation on text generation](https://docs.cohere.com/docs/introduction-to-text-generation-at-cohere)
+- [LLM University module on text generation](https://cohere.com/llmu#text-generation)
diff --git a/fern/pages/v2/get-started/quickstart/tool-use-quickstart.mdx b/fern/pages/v2/get-started/quickstart/tool-use-quickstart.mdx
new file mode 100644
index 000000000..1b577e7f9
--- /dev/null
+++ b/fern/pages/v2/get-started/quickstart/tool-use-quickstart.mdx
@@ -0,0 +1,353 @@
+---
+title: Tool Use & Agents
+slug: /docs/v2/tool-use-quickstart
+
+description: "A quickstart guide for using tool use and building agents with Cohere's Command models (v2 API)."
+image: "../../../../assets/images/f1cc130-cohere_meta_image.jpg"
+keywords: "Cohere, tool use, agents, chatbot, command models"
+---
+Tool use enables developers to build agentic applications that connect to external tools, do reasoning, and perform actions.
+
+The Chat endpoint comes with built-in tool use capabilities such as function calling, multi-step reasoning, and citation generation.
+
+This quickstart guide shows you how to utilize tool use with the Chat endpoint.
+
+
+### Setup
+First, install the Cohere Python SDK with the following command.
+
+```bash
+pip install -U cohere
+```
+
+Next, import the library and create a client.
+
+
+
+
+```python PYTHON
+import cohere
+
+co = cohere.ClientV2("COHERE_API_KEY") # Get your free API key here: https://dashboard.cohere.com/api-keys
+```
+
+
+
+
+```python PYTHON
+import cohere
+
+co = cohere.ClientV2(
+ api_key="", # Leave this blank
+ base_url=""
+)
+```
+
+
+
+
+
+```python PYTHON
+import cohere
+
+co = cohere.BedrockClientV2(
+ aws_region="AWS_REGION",
+ aws_access_key="AWS_ACCESS_KEY_ID",
+ aws_secret_key="AWS_SECRET_ACCESS_KEY",
+ aws_session_token="AWS_SESSION_TOKEN",
+)
+
+# Get the model name: https://docs.aws.amazon.com/bedrock/latest/userguide/models-supported.html
+```
+
+
+
+
+```python PYTHON
+import cohere
+
+co = cohere.SagemakerClientV2(
+ aws_region="AWS_REGION",
+ aws_access_key="AWS_ACCESS_KEY_ID",
+ aws_secret_key="AWS_SECRET_ACCESS_KEY",
+ aws_session_token="AWS_SESSION_TOKEN",
+)
+```
+
+
+
+
+```python PYTHON
+import cohere
+
+co = cohere.ClientV2(
+ api_key="AZURE_API_KEY",
+ base_url="AZURE_ENDPOINT",
+)
+```
+
+
+
+
+
+### Tool Definition
+First, we need to set up the tools. A tool can be any function or service that can receive and send objects.
+
+We also need to define the tool schemas in a format that can be passed to the Chat endpoint. The schema must contain the following fields: `name`, `description`, and `parameters`.
+
+
+```python PYTHON
+def get_weather(location):
+ # Implement your tool calling logic here
+ return [{"temperature": "20C"}]
+ # Return a list of objects e.g. [{"url": "abc.com", "text": "..."}, {"url": "xyz.com", "text": "..."}]
+
+functions_map = {"get_weather": get_weather}
+
+tools = [
+ {
+ "type": "function",
+ "function": {
+ "name": "get_weather",
+ "description" : "gets the weather of a given location",
+ "parameters": {
+ "type": "object",
+ "properties": {
+ "location": {
+ "type" : "string",
+ "description": "the location to get weather, example: San Fransisco, CA"
+ }
+ },
+ "required": ["location"]
+ }
+ }
+ },
+]
+```
+
+### Tool Calling
+Next, pass the tool schema to the Chat endpoint together with the user message.
+
+The LLM will then generate the tool calls (if any) and return the `tool_plan` and `tool_calls` objects.
+
+
+
+```python PYTHON
+messages = [{"role": "user", "content": "What's the weather in Toronto?"}]
+
+response = co.chat(
+ model="command-r-plus-08-2024",
+ messages=messages,
+ tools=tools
+)
+
+if response.message.tool_calls:
+ messages.append(
+ {
+ "role": "assistant",
+ "tool_calls": response.message.tool_calls,
+ "tool_plan": response.message.tool_plan,
+ }
+ )
+ print(response.message.tool_calls)
+```
+
+
+
+```python PYTHON
+messages = [{"role": "user", "content": "What's the weather in Toronto?"}]
+
+response = co.chat(
+ model="command-r-plus-08-2024",
+ messages=messages,
+ tools=tools
+)
+
+if response.message.tool_calls:
+ messages.append(
+ {
+ "role": "assistant",
+ "tool_calls": response.message.tool_calls,
+ "tool_plan": response.message.tool_plan,
+ }
+ )
+ print(response.message.tool_calls)
+```
+
+
+
+```python PYTHON
+messages = [{"role": "user", "content": "What's the weather in Toronto?"}]
+
+response = co.chat(
+ model="YOUR_MODEL_NAME",
+ messages=messages,
+ tools=tools
+)
+
+if response.message.tool_calls:
+ messages.append(
+ {
+ "role": "assistant",
+ "tool_calls": response.message.tool_calls,
+ "tool_plan": response.message.tool_plan,
+ }
+ )
+ print(response.message.tool_calls)
+```
+
+
+
+```python PYTHON
+messages = [{"role": "user", "content": "What's the weather in Toronto?"}]
+
+response = co.chat(
+ model="YOUR_ENDPOINT_NAME",
+ messages=messages,
+ tools=tools
+)
+
+if response.message.tool_calls:
+ messages.append(
+ {
+ "role": "assistant",
+ "tool_calls": response.message.tool_calls,
+ "tool_plan": response.message.tool_plan,
+ }
+ )
+ print(response.message.tool_calls)
+```
+
+
+
+```python PYTHON
+messages = [{"role": "user", "content": "What's the weather in Toronto?"}]
+
+response = co.chat(
+ model="model", # Pass a dummy string
+ messages=messages,
+ tools=tools
+)
+
+if response.message.tool_calls:
+ messages.append(
+ {
+ "role": "assistant",
+ "tool_calls": response.message.tool_calls,
+ "tool_plan": response.message.tool_plan,
+ }
+ )
+ print(response.message.tool_calls)
+```
+
+
+
+
+```mdx wordWrap
+[ToolCallV2(id='get_weather_776n8ctsgycn', type='function', function=ToolCallV2Function(name='get_weather', arguments='{"location":"Toronto"}'))]
+```
+
+### Tool Execution
+Next, the tools called will be executed based on the arguments generated in the tool calling step earlier.
+
+```python PYTHON
+import json
+
+if response.message.tool_calls:
+ for tc in response.message.tool_calls:
+ tool_result = functions_map[tc.function.name](
+ **json.loads(tc.function.arguments)
+ )
+ tool_content = []
+ for data in tool_result:
+ tool_content.append({"type": "document", "document": {"data": json.dumps(data)}})
+ # Optional: add an "id" field in the "document" object, otherwise IDs are auto-generated
+ messages.append(
+ {"role": "tool", "tool_call_id": tc.id, "content": tool_content}
+ )
+```
+
+### Response Generation
+The results are passed back to the LLM, which generates the final response.
+
+
+
+```python PYTHON
+response = co.chat(
+ model="command-r-plus-08-2024",
+ messages=messages,
+ tools=tools
+)
+print(response.message.content[0].text)
+```
+
+
+
+```python PYTHON
+response = co.chat(
+ model="command-r-plus-08-2024",
+ messages=messages,
+ tools=tools
+)
+print(response.message.content[0].text)
+```
+
+
+
+```python PYTHON
+response = co.chat(
+ model="YOUR_MODEL_NAME",
+ messages=messages,
+ tools=tools
+)
+print(response.message.content[0].text)
+```
+
+
+
+```python PYTHON
+response = co.chat(
+ model="YOUR_ENDPOINT_NAME",
+ messages=messages,
+ tools=tools
+)
+print(response.message.content[0].text)
+```
+
+
+
+```python PYTHON
+response = co.chat(
+ model="model", # Pass a dummy string
+ messages=messages,
+ tools=tools
+)
+print(response.message.content[0].text)
+```
+
+
+
+
+
+```mdx wordWrap
+It is 20C in Toronto.
+```
+
+### Citation Generation
+The response object contains a `citations` field, which contains specific text spans from the documents on which the response is grounded.
+
+```python PYTHON
+if response.message.citations:
+ for citation in response.message.citations:
+ print(citation, "\n")
+```
+
+```mdx wordWrap
+start=6 end=9 text='20C' sources=[ToolSource(type='tool', id='get_weather_776n8ctsgycn:0', tool_output={'temperature': '20C'})]
+```
+
+
+## Further Resources
+- [Chat endpoint API reference](https://docs.cohere.com/reference/chat)
+- [Documentation on tool use](https://docs.cohere.com/docs/tools)
+- [LLM University module on tool use](https://cohere.com/llmu#tool-use)
diff --git a/fern/v1.yml b/fern/v1.yml
index f9fdc024c..aa15a7a5c 100644
--- a/fern/v1.yml
+++ b/fern/v1.yml
@@ -25,20 +25,26 @@ navigation:
layout:
- section: Get Started
contents:
- - page: The Cohere Platform
+ - page: Introduction
path: pages/get-started/the-cohere-platform.mdx
- - page: Introduction to Large Language Models
- path: pages/get-started/introduction-to-large-language-models.mdx
- - page: Developer Playground
+ - page: Installation
+ path: pages/get-started/installation.mdx
+ - section: Quickstart
+ contents:
+ - page: Text Generation
+ path: pages/get-started/quickstart/text-gen-quickstart.mdx
+ - page: RAG
+ path: pages/get-started/quickstart/rag-quickstart.mdx
+ - page: Tool Use & Agents
+ path: pages/get-started/quickstart/tool-use-quickstart.mdx
+ - page: Semantic Search
+ path: pages/get-started/quickstart/sem-search-quickstart.mdx
+ - page: Reranking
+ path: pages/get-started/quickstart/reranking-quickstart.mdx
+ - page: Playground
path: pages/get-started/playground-overview.mdx
- - page: Cohere FAQs
+ - page: FAQs
path: pages/get-started/frequently-asked-questions.mdx
- - page: Cohere Toolkit
- path: pages/get-started/cohere-toolkit.mdx
- - page: Datasets
- path: pages/get-started/datasets.mdx
- - page: Improve The Cohere Docs
- path: pages/get-started/contribute.mdx
- section: Models
contents:
- page: An Overview of Cohere's Models
@@ -314,6 +320,14 @@ navigation:
contents:
- page: Cohere For AI Acceptable Use Policy
path: pages/cohere-for-ai/c4ai-acceptable-use-policy.mdx
+ - section: More Resources
+ contents:
+ - page: Cohere Toolkit
+ path: pages/get-started/cohere-toolkit.mdx
+ - page: Datasets
+ path: pages/get-started/datasets.mdx
+ - page: Improve Cohere Docs
+ path: pages/get-started/contribute.mdx
# HIDDEN SECTION
- page: API Keys
hidden: true
diff --git a/fern/v2.yml b/fern/v2.yml
index e56fdc0df..99a360847 100644
--- a/fern/v2.yml
+++ b/fern/v2.yml
@@ -25,20 +25,26 @@ navigation:
layout:
- section: Get Started
contents:
- - page: The Cohere Platform
+ - page: Introduction
path: pages/get-started/the-cohere-platform.mdx
- - page: Introduction to Large Language Models
- path: pages/get-started/introduction-to-large-language-models.mdx
- - page: Developer Playground
+ - page: Installation
+ path: pages/get-started/installation.mdx
+ - section: Quickstart
+ contents:
+ - page: Text Generation
+ path: pages/v2/get-started/quickstart/text-gen-quickstart.mdx
+ - page: RAG
+ path: pages/v2/get-started/quickstart/rag-quickstart.mdx
+ - page: Tool Use & Agents
+ path: pages/v2/get-started/quickstart/tool-use-quickstart.mdx
+ - page: Semantic Search
+ path: pages/v2/get-started/quickstart/sem-search-quickstart.mdx
+ - page: Reranking
+ path: pages/v2/get-started/quickstart/reranking-quickstart.mdx
+ - page: Playground
path: pages/get-started/playground-overview.mdx
- - page: Cohere FAQs
+ - page: FAQs
path: pages/get-started/frequently-asked-questions.mdx
- - page: Cohere Toolkit
- path: pages/get-started/cohere-toolkit.mdx
- - page: Datasets
- path: pages/get-started/datasets.mdx
- - page: Improve The Cohere Docs
- path: pages/get-started/contribute.mdx
- section: Models
contents:
- page: An Overview of Cohere's Models
@@ -314,6 +320,14 @@ navigation:
contents:
- page: Cohere For AI Acceptable Use Policy
path: pages/cohere-for-ai/c4ai-acceptable-use-policy.mdx
+ - section: More Resources
+ contents:
+ - page: Cohere Toolkit
+ path: pages/get-started/cohere-toolkit.mdx
+ - page: Datasets
+ path: pages/get-started/datasets.mdx
+ - page: Improve Cohere Docs
+ path: pages/get-started/contribute.mdx
# HIDDEN SECTION
- page: API Keys
hidden: true