Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

chore: bump LITS 0.9 #505

Merged
merged 9 commits into from
Feb 20, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 5 additions & 0 deletions .changeset/grumpy-snails-listen.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
---
"create-llama": patch
---

chore: bump LITS 0.9
112 changes: 90 additions & 22 deletions helpers/typescript.ts
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ import { assetRelocator, copy } from "../helpers/copy";
import { callPackageManager } from "../helpers/install";
import { templatesDir } from "./dir";
import { PackageManager } from "./get-pkg-manager";
import { InstallTemplateArgs } from "./types";
import { InstallTemplateArgs, ModelProvider, TemplateVectorDB } from "./types";

/**
* Install a LlamaIndex internal template to a given `root` directory.
Expand All @@ -27,6 +27,7 @@ export const installTSTemplate = async ({
dataSources,
useLlamaParse,
useCase,
modelConfig,
}: InstallTemplateArgs & { backend: boolean }) => {
console.log(bold(`Using ${packageManager}.`));

Expand Down Expand Up @@ -181,6 +182,12 @@ export const installTSTemplate = async ({
cwd: path.join(compPath, "loaders", "typescript", loaderFolder),
});

// copy provider settings
await copy("**", enginePath, {
parents: true,
cwd: path.join(compPath, "providers", "typescript", modelConfig.provider),
});

// Select and copy engine code based on data sources and tools
let engine;
tools = tools ?? [];
Expand Down Expand Up @@ -239,6 +246,8 @@ export const installTSTemplate = async ({
ui,
observability,
vectorDb,
backend,
modelConfig,
});

if (
Expand All @@ -249,6 +258,68 @@ export const installTSTemplate = async ({
}
};

const providerDependencies: {
[key in ModelProvider]?: Record<string, string>;
} = {
openai: {
"@llamaindex/openai": "^0.1.52",
},
gemini: {
"@llamaindex/google": "^0.0.7",
},
ollama: {
"@llamaindex/ollama": "^0.0.40",
},
mistral: {
"@llamaindex/mistral": "^0.0.5",
},
"azure-openai": {
"@llamaindex/openai": "^0.1.52",
},
groq: {
"@llamaindex/groq": "^0.0.51",
"@llamaindex/huggingface": "^0.0.36", // groq uses huggingface as default embedding model
},
anthropic: {
"@llamaindex/anthropic": "^0.1.0",
"@llamaindex/huggingface": "^0.0.36", // anthropic uses huggingface as default embedding model
},
};

const vectorDbDependencies: Record<TemplateVectorDB, Record<string, string>> = {
astra: {
"@llamaindex/astra": "^0.0.5",
},
chroma: {
"@llamaindex/chroma": "^0.0.5",
},
llamacloud: {},
milvus: {
"@zilliz/milvus2-sdk-node": "^2.4.6",
"@llamaindex/milvus": "^0.1.0",
},
mongo: {
mongodb: "6.7.0",
"@llamaindex/mongodb": "^0.0.5",
},
none: {},
pg: {
pg: "^8.12.0",
pgvector: "^0.2.0",
"@llamaindex/postgres": "^0.0.33",
},
pinecone: {
"@llamaindex/pinecone": "^0.0.5",
},
qdrant: {
"@qdrant/js-client-rest": "^1.11.0",
"@llamaindex/qdrant": "^0.1.0",
},
weaviate: {
"@llamaindex/weaviate": "^0.0.5",
},
};

async function updatePackageJson({
root,
appName,
Expand All @@ -258,6 +329,8 @@ async function updatePackageJson({
ui,
observability,
vectorDb,
backend,
modelConfig,
}: Pick<
InstallTemplateArgs,
| "root"
Expand All @@ -267,8 +340,10 @@ async function updatePackageJson({
| "ui"
| "observability"
| "vectorDb"
| "modelConfig"
> & {
relativeEngineDestPath: string;
backend: boolean;
}): Promise<any> {
const packageJsonFile = path.join(root, "package.json");
const packageJson: any = JSON.parse(
Expand Down Expand Up @@ -308,32 +383,25 @@ async function updatePackageJson({
};
}

if (vectorDb === "pg") {
if (backend) {
packageJson.dependencies = {
...packageJson.dependencies,
pg: "^8.12.0",
pgvector: "^0.2.0",
"@llamaindex/readers": "^2.0.0",
};
}

if (vectorDb === "qdrant") {
packageJson.dependencies = {
...packageJson.dependencies,
"@qdrant/js-client-rest": "^1.11.0",
};
}
if (vectorDb === "mongo") {
packageJson.dependencies = {
...packageJson.dependencies,
mongodb: "^6.7.0",
};
}
if (vectorDb && vectorDb in vectorDbDependencies) {
packageJson.dependencies = {
...packageJson.dependencies,
...vectorDbDependencies[vectorDb],
};
}

if (vectorDb === "milvus") {
packageJson.dependencies = {
...packageJson.dependencies,
"@zilliz/milvus2-sdk-node": "^2.4.6",
};
if (modelConfig.provider && modelConfig.provider in providerDependencies) {
packageJson.dependencies = {
...packageJson.dependencies,
...providerDependencies[modelConfig.provider],
};
}
}

if (observability === "traceloop") {
Expand Down
2 changes: 1 addition & 1 deletion templates/components/loaders/typescript/file/loader.ts
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
import {
FILE_EXT_TO_READER,
SimpleDirectoryReader,
} from "llamaindex/readers/index";
} from "@llamaindex/readers/directory";

export const DATA_DIR = "./data";

Expand Down
4 changes: 2 additions & 2 deletions templates/components/loaders/typescript/llama_parse/loader.ts
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
import { LlamaParseReader } from "llamaindex";
import {
FILE_EXT_TO_READER,
SimpleDirectoryReader,
} from "llamaindex/readers/index";
} from "@llamaindex/readers/directory";
import { LlamaParseReader } from "llamaindex";

export const DATA_DIR = "./data";

Expand Down
19 changes: 19 additions & 0 deletions templates/components/providers/typescript/anthropic/provider.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,19 @@
import {
ALL_AVAILABLE_ANTHROPIC_MODELS,
Anthropic,
} from "@llamaindex/anthropic";
import { HuggingFaceEmbedding } from "@llamaindex/huggingface";
import { Settings } from "llamaindex";

export function setupProvider() {
const embedModelMap: Record<string, string> = {
"all-MiniLM-L6-v2": "Xenova/all-MiniLM-L6-v2",
"all-mpnet-base-v2": "Xenova/all-mpnet-base-v2",
};
Settings.llm = new Anthropic({
model: process.env.MODEL as keyof typeof ALL_AVAILABLE_ANTHROPIC_MODELS,
});
Settings.embedModel = new HuggingFaceEmbedding({
modelType: embedModelMap[process.env.EMBEDDING_MODEL!],
});
}
49 changes: 49 additions & 0 deletions templates/components/providers/typescript/azure-openai/provider.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,49 @@
import { OpenAI, OpenAIEmbedding } from "@llamaindex/openai";
import { Settings } from "llamaindex";

export function setupProvider() {
// Map Azure OpenAI model names to OpenAI model names (only for TS)
const AZURE_OPENAI_MODEL_MAP: Record<string, string> = {
"gpt-35-turbo": "gpt-3.5-turbo",
"gpt-35-turbo-16k": "gpt-3.5-turbo-16k",
"gpt-4o": "gpt-4o",
"gpt-4": "gpt-4",
"gpt-4-32k": "gpt-4-32k",
"gpt-4-turbo": "gpt-4-turbo",
"gpt-4-turbo-2024-04-09": "gpt-4-turbo",
"gpt-4-vision-preview": "gpt-4-vision-preview",
"gpt-4-1106-preview": "gpt-4-1106-preview",
"gpt-4o-2024-05-13": "gpt-4o-2024-05-13",
};

const azureConfig = {
apiKey: process.env.AZURE_OPENAI_KEY,
endpoint: process.env.AZURE_OPENAI_ENDPOINT,
apiVersion:
process.env.AZURE_OPENAI_API_VERSION || process.env.OPENAI_API_VERSION,
};

Settings.llm = new OpenAI({
model:
AZURE_OPENAI_MODEL_MAP[process.env.MODEL ?? "gpt-35-turbo"] ??
"gpt-3.5-turbo",
maxTokens: process.env.LLM_MAX_TOKENS
? Number(process.env.LLM_MAX_TOKENS)
: undefined,
azure: {
...azureConfig,
deployment: process.env.AZURE_OPENAI_LLM_DEPLOYMENT,
},
});

Settings.embedModel = new OpenAIEmbedding({
model: process.env.EMBEDDING_MODEL,
dimensions: process.env.EMBEDDING_DIM
? parseInt(process.env.EMBEDDING_DIM)
: undefined,
azure: {
...azureConfig,
deployment: process.env.AZURE_OPENAI_EMBEDDING_DEPLOYMENT,
},
});
}
16 changes: 16 additions & 0 deletions templates/components/providers/typescript/gemini/provider.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
import {
Gemini,
GEMINI_EMBEDDING_MODEL,
GEMINI_MODEL,
GeminiEmbedding,
} from "@llamaindex/google";
import { Settings } from "llamaindex";

export function setupProvider() {
Settings.llm = new Gemini({
model: process.env.MODEL as GEMINI_MODEL,
});
Settings.embedModel = new GeminiEmbedding({
model: process.env.EMBEDDING_MODEL as GEMINI_EMBEDDING_MODEL,
});
}
18 changes: 18 additions & 0 deletions templates/components/providers/typescript/groq/provider.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
import { Groq } from "@llamaindex/groq";
import { HuggingFaceEmbedding } from "@llamaindex/huggingface";
import { Settings } from "llamaindex";

export function setupProvider() {
const embedModelMap: Record<string, string> = {
"all-MiniLM-L6-v2": "Xenova/all-MiniLM-L6-v2",
"all-mpnet-base-v2": "Xenova/all-mpnet-base-v2",
};

Settings.llm = new Groq({
model: process.env.MODEL!,
});

Settings.embedModel = new HuggingFaceEmbedding({
modelType: embedModelMap[process.env.EMBEDDING_MODEL!],
});
}
16 changes: 16 additions & 0 deletions templates/components/providers/typescript/mistral/provider.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
import {
ALL_AVAILABLE_MISTRAL_MODELS,
MistralAI,
MistralAIEmbedding,
MistralAIEmbeddingModelType,
} from "@llamaindex/mistral";
import { Settings } from "llamaindex";

export function setupProvider() {
Settings.llm = new MistralAI({
model: process.env.MODEL as keyof typeof ALL_AVAILABLE_MISTRAL_MODELS,
});
Settings.embedModel = new MistralAIEmbedding({
model: process.env.EMBEDDING_MODEL as MistralAIEmbeddingModelType,
});
}
16 changes: 16 additions & 0 deletions templates/components/providers/typescript/ollama/provider.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
import { Ollama, OllamaEmbedding } from "@llamaindex/ollama";
import { Settings } from "llamaindex";

export function setupProvider() {
const config = {
host: process.env.OLLAMA_BASE_URL ?? "http://127.0.0.1:11434",
};
Settings.llm = new Ollama({
model: process.env.MODEL ?? "",
config,
});
Settings.embedModel = new OllamaEmbedding({
model: process.env.EMBEDDING_MODEL ?? "",
config,
});
}
17 changes: 17 additions & 0 deletions templates/components/providers/typescript/openai/provider.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
import { OpenAI, OpenAIEmbedding } from "@llamaindex/openai";
import { Settings } from "llamaindex";

export function setupProvider() {
Settings.llm = new OpenAI({
model: process.env.MODEL ?? "gpt-4o-mini",
maxTokens: process.env.LLM_MAX_TOKENS
? Number(process.env.LLM_MAX_TOKENS)
: undefined,
});
Settings.embedModel = new OpenAIEmbedding({
model: process.env.EMBEDDING_MODEL,
dimensions: process.env.EMBEDDING_DIM
? parseInt(process.env.EMBEDDING_DIM)
: undefined,
});
}
Loading